1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * videobuf2-v4l2.c - V4L2 driver helper framework
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 2010 Samsung Electronics
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Author: Pawel Osciak <pawel@osciak.com>
7*4882a593Smuzhiyun * Marek Szyprowski <m.szyprowski@samsung.com>
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * The vb2_thread implementation was based on code from videobuf-dvb.c:
10*4882a593Smuzhiyun * (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs]
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or modify
13*4882a593Smuzhiyun * it under the terms of the GNU General Public License as published by
14*4882a593Smuzhiyun * the Free Software Foundation.
15*4882a593Smuzhiyun */
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #include <linux/device.h>
18*4882a593Smuzhiyun #include <linux/err.h>
19*4882a593Smuzhiyun #include <linux/freezer.h>
20*4882a593Smuzhiyun #include <linux/kernel.h>
21*4882a593Smuzhiyun #include <linux/kthread.h>
22*4882a593Smuzhiyun #include <linux/mm.h>
23*4882a593Smuzhiyun #include <linux/module.h>
24*4882a593Smuzhiyun #include <linux/poll.h>
25*4882a593Smuzhiyun #include <linux/sched.h>
26*4882a593Smuzhiyun #include <linux/slab.h>
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #include <media/v4l2-common.h>
29*4882a593Smuzhiyun #include <media/v4l2-dev.h>
30*4882a593Smuzhiyun #include <media/v4l2-device.h>
31*4882a593Smuzhiyun #include <media/v4l2-event.h>
32*4882a593Smuzhiyun #include <media/v4l2-fh.h>
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #include <media/videobuf2-v4l2.h>
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun static int debug;
37*4882a593Smuzhiyun module_param(debug, int, 0644);
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun #define dprintk(q, level, fmt, arg...) \
40*4882a593Smuzhiyun do { \
41*4882a593Smuzhiyun if (debug >= level) \
42*4882a593Smuzhiyun pr_info("vb2-v4l2: [%p] %s: " fmt, \
43*4882a593Smuzhiyun (q)->name, __func__, ## arg); \
44*4882a593Smuzhiyun } while (0)
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun /* Flags that are set by us */
47*4882a593Smuzhiyun #define V4L2_BUFFER_MASK_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \
48*4882a593Smuzhiyun V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \
49*4882a593Smuzhiyun V4L2_BUF_FLAG_PREPARED | \
50*4882a593Smuzhiyun V4L2_BUF_FLAG_IN_REQUEST | \
51*4882a593Smuzhiyun V4L2_BUF_FLAG_REQUEST_FD | \
52*4882a593Smuzhiyun V4L2_BUF_FLAG_TIMESTAMP_MASK)
53*4882a593Smuzhiyun /* Output buffer flags that should be passed on to the driver */
54*4882a593Smuzhiyun #define V4L2_BUFFER_OUT_FLAGS (V4L2_BUF_FLAG_PFRAME | \
55*4882a593Smuzhiyun V4L2_BUF_FLAG_BFRAME | \
56*4882a593Smuzhiyun V4L2_BUF_FLAG_KEYFRAME | \
57*4882a593Smuzhiyun V4L2_BUF_FLAG_TIMECODE | \
58*4882a593Smuzhiyun V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF)
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun /*
61*4882a593Smuzhiyun * __verify_planes_array() - verify that the planes array passed in struct
62*4882a593Smuzhiyun * v4l2_buffer from userspace can be safely used
63*4882a593Smuzhiyun */
__verify_planes_array(struct vb2_buffer * vb,const struct v4l2_buffer * b)64*4882a593Smuzhiyun static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer *b)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun if (!V4L2_TYPE_IS_MULTIPLANAR(b->type))
67*4882a593Smuzhiyun return 0;
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun /* Is memory for copying plane information present? */
70*4882a593Smuzhiyun if (b->m.planes == NULL) {
71*4882a593Smuzhiyun dprintk(vb->vb2_queue, 1,
72*4882a593Smuzhiyun "multi-planar buffer passed but planes array not provided\n");
73*4882a593Smuzhiyun return -EINVAL;
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun if (b->length < vb->num_planes || b->length > VB2_MAX_PLANES) {
77*4882a593Smuzhiyun dprintk(vb->vb2_queue, 1,
78*4882a593Smuzhiyun "incorrect planes array length, expected %d, got %d\n",
79*4882a593Smuzhiyun vb->num_planes, b->length);
80*4882a593Smuzhiyun return -EINVAL;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun return 0;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun
__verify_planes_array_core(struct vb2_buffer * vb,const void * pb)86*4882a593Smuzhiyun static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun return __verify_planes_array(vb, pb);
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun /*
92*4882a593Smuzhiyun * __verify_length() - Verify that the bytesused value for each plane fits in
93*4882a593Smuzhiyun * the plane length and that the data offset doesn't exceed the bytesused value.
94*4882a593Smuzhiyun */
__verify_length(struct vb2_buffer * vb,const struct v4l2_buffer * b)95*4882a593Smuzhiyun static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun unsigned int length;
98*4882a593Smuzhiyun unsigned int bytesused;
99*4882a593Smuzhiyun unsigned int plane;
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun if (V4L2_TYPE_IS_CAPTURE(b->type))
102*4882a593Smuzhiyun return 0;
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
105*4882a593Smuzhiyun for (plane = 0; plane < vb->num_planes; ++plane) {
106*4882a593Smuzhiyun length = (b->memory == VB2_MEMORY_USERPTR ||
107*4882a593Smuzhiyun b->memory == VB2_MEMORY_DMABUF)
108*4882a593Smuzhiyun ? b->m.planes[plane].length
109*4882a593Smuzhiyun : vb->planes[plane].length;
110*4882a593Smuzhiyun bytesused = b->m.planes[plane].bytesused
111*4882a593Smuzhiyun ? b->m.planes[plane].bytesused : length;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun if (b->m.planes[plane].bytesused > length)
114*4882a593Smuzhiyun return -EINVAL;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun if (b->m.planes[plane].data_offset > 0 &&
117*4882a593Smuzhiyun b->m.planes[plane].data_offset >= bytesused)
118*4882a593Smuzhiyun return -EINVAL;
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun } else {
121*4882a593Smuzhiyun length = (b->memory == VB2_MEMORY_USERPTR)
122*4882a593Smuzhiyun ? b->length : vb->planes[0].length;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun if (b->bytesused > length)
125*4882a593Smuzhiyun return -EINVAL;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun return 0;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun /*
132*4882a593Smuzhiyun * __init_vb2_v4l2_buffer() - initialize the vb2_v4l2_buffer struct
133*4882a593Smuzhiyun */
__init_vb2_v4l2_buffer(struct vb2_buffer * vb)134*4882a593Smuzhiyun static void __init_vb2_v4l2_buffer(struct vb2_buffer *vb)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun vbuf->request_fd = -1;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
__copy_timestamp(struct vb2_buffer * vb,const void * pb)141*4882a593Smuzhiyun static void __copy_timestamp(struct vb2_buffer *vb, const void *pb)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun const struct v4l2_buffer *b = pb;
144*4882a593Smuzhiyun struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
145*4882a593Smuzhiyun struct vb2_queue *q = vb->vb2_queue;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun if (q->is_output) {
148*4882a593Smuzhiyun /*
149*4882a593Smuzhiyun * For output buffers copy the timestamp if needed,
150*4882a593Smuzhiyun * and the timecode field and flag if needed.
151*4882a593Smuzhiyun */
152*4882a593Smuzhiyun if (q->copy_timestamp)
153*4882a593Smuzhiyun vb->timestamp = v4l2_buffer_get_timestamp(b);
154*4882a593Smuzhiyun vbuf->flags |= b->flags & V4L2_BUF_FLAG_TIMECODE;
155*4882a593Smuzhiyun if (b->flags & V4L2_BUF_FLAG_TIMECODE)
156*4882a593Smuzhiyun vbuf->timecode = b->timecode;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun };
159*4882a593Smuzhiyun
vb2_warn_zero_bytesused(struct vb2_buffer * vb)160*4882a593Smuzhiyun static void vb2_warn_zero_bytesused(struct vb2_buffer *vb)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun static bool check_once;
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun if (check_once)
165*4882a593Smuzhiyun return;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun check_once = true;
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun pr_warn("use of bytesused == 0 is deprecated and will be removed in the future,\n");
170*4882a593Smuzhiyun if (vb->vb2_queue->allow_zero_bytesused)
171*4882a593Smuzhiyun pr_warn("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
172*4882a593Smuzhiyun else
173*4882a593Smuzhiyun pr_warn("use the actual size instead.\n");
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun
vb2_fill_vb2_v4l2_buffer(struct vb2_buffer * vb,struct v4l2_buffer * b)176*4882a593Smuzhiyun static int vb2_fill_vb2_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun struct vb2_queue *q = vb->vb2_queue;
179*4882a593Smuzhiyun struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
180*4882a593Smuzhiyun struct vb2_plane *planes = vbuf->planes;
181*4882a593Smuzhiyun unsigned int plane;
182*4882a593Smuzhiyun int ret;
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun ret = __verify_length(vb, b);
185*4882a593Smuzhiyun if (ret < 0) {
186*4882a593Smuzhiyun dprintk(q, 1, "plane parameters verification failed: %d\n", ret);
187*4882a593Smuzhiyun return ret;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun if (b->field == V4L2_FIELD_ALTERNATE && q->is_output) {
190*4882a593Smuzhiyun /*
191*4882a593Smuzhiyun * If the format's field is ALTERNATE, then the buffer's field
192*4882a593Smuzhiyun * should be either TOP or BOTTOM, not ALTERNATE since that
193*4882a593Smuzhiyun * makes no sense. The driver has to know whether the
194*4882a593Smuzhiyun * buffer represents a top or a bottom field in order to
195*4882a593Smuzhiyun * program any DMA correctly. Using ALTERNATE is wrong, since
196*4882a593Smuzhiyun * that just says that it is either a top or a bottom field,
197*4882a593Smuzhiyun * but not which of the two it is.
198*4882a593Smuzhiyun */
199*4882a593Smuzhiyun dprintk(q, 1, "the field is incorrectly set to ALTERNATE for an output buffer\n");
200*4882a593Smuzhiyun return -EINVAL;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun vbuf->sequence = 0;
203*4882a593Smuzhiyun vbuf->request_fd = -1;
204*4882a593Smuzhiyun vbuf->is_held = false;
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
207*4882a593Smuzhiyun switch (b->memory) {
208*4882a593Smuzhiyun case VB2_MEMORY_USERPTR:
209*4882a593Smuzhiyun for (plane = 0; plane < vb->num_planes; ++plane) {
210*4882a593Smuzhiyun planes[plane].m.userptr =
211*4882a593Smuzhiyun b->m.planes[plane].m.userptr;
212*4882a593Smuzhiyun planes[plane].length =
213*4882a593Smuzhiyun b->m.planes[plane].length;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun break;
216*4882a593Smuzhiyun case VB2_MEMORY_DMABUF:
217*4882a593Smuzhiyun for (plane = 0; plane < vb->num_planes; ++plane) {
218*4882a593Smuzhiyun planes[plane].m.fd =
219*4882a593Smuzhiyun b->m.planes[plane].m.fd;
220*4882a593Smuzhiyun planes[plane].length =
221*4882a593Smuzhiyun b->m.planes[plane].length;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun break;
224*4882a593Smuzhiyun default:
225*4882a593Smuzhiyun for (plane = 0; plane < vb->num_planes; ++plane) {
226*4882a593Smuzhiyun planes[plane].m.offset =
227*4882a593Smuzhiyun vb->planes[plane].m.offset;
228*4882a593Smuzhiyun planes[plane].length =
229*4882a593Smuzhiyun vb->planes[plane].length;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun break;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun /* Fill in driver-provided information for OUTPUT types */
235*4882a593Smuzhiyun if (V4L2_TYPE_IS_OUTPUT(b->type)) {
236*4882a593Smuzhiyun /*
237*4882a593Smuzhiyun * Will have to go up to b->length when API starts
238*4882a593Smuzhiyun * accepting variable number of planes.
239*4882a593Smuzhiyun *
240*4882a593Smuzhiyun * If bytesused == 0 for the output buffer, then fall
241*4882a593Smuzhiyun * back to the full buffer size. In that case
242*4882a593Smuzhiyun * userspace clearly never bothered to set it and
243*4882a593Smuzhiyun * it's a safe assumption that they really meant to
244*4882a593Smuzhiyun * use the full plane sizes.
245*4882a593Smuzhiyun *
246*4882a593Smuzhiyun * Some drivers, e.g. old codec drivers, use bytesused == 0
247*4882a593Smuzhiyun * as a way to indicate that streaming is finished.
248*4882a593Smuzhiyun * In that case, the driver should use the
249*4882a593Smuzhiyun * allow_zero_bytesused flag to keep old userspace
250*4882a593Smuzhiyun * applications working.
251*4882a593Smuzhiyun */
252*4882a593Smuzhiyun for (plane = 0; plane < vb->num_planes; ++plane) {
253*4882a593Smuzhiyun struct vb2_plane *pdst = &planes[plane];
254*4882a593Smuzhiyun struct v4l2_plane *psrc = &b->m.planes[plane];
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun if (psrc->bytesused == 0)
257*4882a593Smuzhiyun vb2_warn_zero_bytesused(vb);
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun if (vb->vb2_queue->allow_zero_bytesused)
260*4882a593Smuzhiyun pdst->bytesused = psrc->bytesused;
261*4882a593Smuzhiyun else
262*4882a593Smuzhiyun pdst->bytesused = psrc->bytesused ?
263*4882a593Smuzhiyun psrc->bytesused : pdst->length;
264*4882a593Smuzhiyun pdst->data_offset = psrc->data_offset;
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun } else {
268*4882a593Smuzhiyun /*
269*4882a593Smuzhiyun * Single-planar buffers do not use planes array,
270*4882a593Smuzhiyun * so fill in relevant v4l2_buffer struct fields instead.
271*4882a593Smuzhiyun * In videobuf we use our internal V4l2_planes struct for
272*4882a593Smuzhiyun * single-planar buffers as well, for simplicity.
273*4882a593Smuzhiyun *
274*4882a593Smuzhiyun * If bytesused == 0 for the output buffer, then fall back
275*4882a593Smuzhiyun * to the full buffer size as that's a sensible default.
276*4882a593Smuzhiyun *
277*4882a593Smuzhiyun * Some drivers, e.g. old codec drivers, use bytesused == 0 as
278*4882a593Smuzhiyun * a way to indicate that streaming is finished. In that case,
279*4882a593Smuzhiyun * the driver should use the allow_zero_bytesused flag to keep
280*4882a593Smuzhiyun * old userspace applications working.
281*4882a593Smuzhiyun */
282*4882a593Smuzhiyun switch (b->memory) {
283*4882a593Smuzhiyun case VB2_MEMORY_USERPTR:
284*4882a593Smuzhiyun planes[0].m.userptr = b->m.userptr;
285*4882a593Smuzhiyun planes[0].length = b->length;
286*4882a593Smuzhiyun break;
287*4882a593Smuzhiyun case VB2_MEMORY_DMABUF:
288*4882a593Smuzhiyun planes[0].m.fd = b->m.fd;
289*4882a593Smuzhiyun planes[0].length = b->length;
290*4882a593Smuzhiyun break;
291*4882a593Smuzhiyun default:
292*4882a593Smuzhiyun planes[0].m.offset = vb->planes[0].m.offset;
293*4882a593Smuzhiyun planes[0].length = vb->planes[0].length;
294*4882a593Smuzhiyun break;
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun planes[0].data_offset = 0;
298*4882a593Smuzhiyun #if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_COMPAT_32BIT_TIME) && \
299*4882a593Smuzhiyun IS_ENABLED(CONFIG_USB_F_UVC)
300*4882a593Smuzhiyun if (b->memory == VB2_MEMORY_DMABUF)
301*4882a593Smuzhiyun planes[0].data_offset = b->reserved2;
302*4882a593Smuzhiyun #endif
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun if (V4L2_TYPE_IS_OUTPUT(b->type)) {
305*4882a593Smuzhiyun if (b->bytesused == 0)
306*4882a593Smuzhiyun vb2_warn_zero_bytesused(vb);
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun if (vb->vb2_queue->allow_zero_bytesused)
309*4882a593Smuzhiyun planes[0].bytesused = b->bytesused;
310*4882a593Smuzhiyun else
311*4882a593Smuzhiyun planes[0].bytesused = b->bytesused ?
312*4882a593Smuzhiyun b->bytesused : planes[0].length;
313*4882a593Smuzhiyun } else
314*4882a593Smuzhiyun planes[0].bytesused = 0;
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun /* Zero flags that we handle */
319*4882a593Smuzhiyun vbuf->flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS;
320*4882a593Smuzhiyun if (!vb->vb2_queue->copy_timestamp || V4L2_TYPE_IS_CAPTURE(b->type)) {
321*4882a593Smuzhiyun /*
322*4882a593Smuzhiyun * Non-COPY timestamps and non-OUTPUT queues will get
323*4882a593Smuzhiyun * their timestamp and timestamp source flags from the
324*4882a593Smuzhiyun * queue.
325*4882a593Smuzhiyun */
326*4882a593Smuzhiyun vbuf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun if (V4L2_TYPE_IS_OUTPUT(b->type)) {
330*4882a593Smuzhiyun /*
331*4882a593Smuzhiyun * For output buffers mask out the timecode flag:
332*4882a593Smuzhiyun * this will be handled later in vb2_qbuf().
333*4882a593Smuzhiyun * The 'field' is valid metadata for this output buffer
334*4882a593Smuzhiyun * and so that needs to be copied here.
335*4882a593Smuzhiyun */
336*4882a593Smuzhiyun vbuf->flags &= ~V4L2_BUF_FLAG_TIMECODE;
337*4882a593Smuzhiyun vbuf->field = b->field;
338*4882a593Smuzhiyun if (!(q->subsystem_flags & VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF))
339*4882a593Smuzhiyun vbuf->flags &= ~V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF;
340*4882a593Smuzhiyun } else {
341*4882a593Smuzhiyun /* Zero any output buffer flags as this is a capture buffer */
342*4882a593Smuzhiyun vbuf->flags &= ~V4L2_BUFFER_OUT_FLAGS;
343*4882a593Smuzhiyun /* Zero last flag, this is a signal from driver to userspace */
344*4882a593Smuzhiyun vbuf->flags &= ~V4L2_BUF_FLAG_LAST;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun return 0;
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun
set_buffer_cache_hints(struct vb2_queue * q,struct vb2_buffer * vb,struct v4l2_buffer * b)350*4882a593Smuzhiyun static void set_buffer_cache_hints(struct vb2_queue *q,
351*4882a593Smuzhiyun struct vb2_buffer *vb,
352*4882a593Smuzhiyun struct v4l2_buffer *b)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun /*
355*4882a593Smuzhiyun * DMA exporter should take care of cache syncs, so we can avoid
356*4882a593Smuzhiyun * explicit ->prepare()/->finish() syncs. For other ->memory types
357*4882a593Smuzhiyun * we always need ->prepare() or/and ->finish() cache sync.
358*4882a593Smuzhiyun */
359*4882a593Smuzhiyun if (q->memory == VB2_MEMORY_DMABUF) {
360*4882a593Smuzhiyun vb->need_cache_sync_on_finish = 0;
361*4882a593Smuzhiyun vb->need_cache_sync_on_prepare = 0;
362*4882a593Smuzhiyun return;
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun /*
366*4882a593Smuzhiyun * Cache sync/invalidation flags are set by default in order to
367*4882a593Smuzhiyun * preserve existing behaviour for old apps/drivers.
368*4882a593Smuzhiyun */
369*4882a593Smuzhiyun vb->need_cache_sync_on_prepare = 1;
370*4882a593Smuzhiyun vb->need_cache_sync_on_finish = 1;
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun if (!vb2_queue_allows_cache_hints(q)) {
373*4882a593Smuzhiyun /*
374*4882a593Smuzhiyun * Clear buffer cache flags if queue does not support user
375*4882a593Smuzhiyun * space hints. That's to indicate to userspace that these
376*4882a593Smuzhiyun * flags won't work.
377*4882a593Smuzhiyun */
378*4882a593Smuzhiyun b->flags &= ~V4L2_BUF_FLAG_NO_CACHE_INVALIDATE;
379*4882a593Smuzhiyun b->flags &= ~V4L2_BUF_FLAG_NO_CACHE_CLEAN;
380*4882a593Smuzhiyun return;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun /*
384*4882a593Smuzhiyun * ->finish() cache sync can be avoided when queue direction is
385*4882a593Smuzhiyun * TO_DEVICE.
386*4882a593Smuzhiyun */
387*4882a593Smuzhiyun if (q->dma_dir == DMA_TO_DEVICE)
388*4882a593Smuzhiyun vb->need_cache_sync_on_finish = 0;
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun if (b->flags & V4L2_BUF_FLAG_NO_CACHE_INVALIDATE)
391*4882a593Smuzhiyun vb->need_cache_sync_on_finish = 0;
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun if (b->flags & V4L2_BUF_FLAG_NO_CACHE_CLEAN)
394*4882a593Smuzhiyun vb->need_cache_sync_on_prepare = 0;
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun
vb2_queue_or_prepare_buf(struct vb2_queue * q,struct media_device * mdev,struct v4l2_buffer * b,bool is_prepare,struct media_request ** p_req)397*4882a593Smuzhiyun static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
398*4882a593Smuzhiyun struct v4l2_buffer *b, bool is_prepare,
399*4882a593Smuzhiyun struct media_request **p_req)
400*4882a593Smuzhiyun {
401*4882a593Smuzhiyun const char *opname = is_prepare ? "prepare_buf" : "qbuf";
402*4882a593Smuzhiyun struct media_request *req;
403*4882a593Smuzhiyun struct vb2_v4l2_buffer *vbuf;
404*4882a593Smuzhiyun struct vb2_buffer *vb;
405*4882a593Smuzhiyun int ret;
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun if (b->type != q->type) {
408*4882a593Smuzhiyun dprintk(q, 1, "%s: invalid buffer type\n", opname);
409*4882a593Smuzhiyun return -EINVAL;
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun if (b->index >= q->num_buffers) {
413*4882a593Smuzhiyun dprintk(q, 1, "%s: buffer index out of range\n", opname);
414*4882a593Smuzhiyun return -EINVAL;
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun if (q->bufs[b->index] == NULL) {
418*4882a593Smuzhiyun /* Should never happen */
419*4882a593Smuzhiyun dprintk(q, 1, "%s: buffer is NULL\n", opname);
420*4882a593Smuzhiyun return -EINVAL;
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun if (b->memory != q->memory) {
424*4882a593Smuzhiyun dprintk(q, 1, "%s: invalid memory type\n", opname);
425*4882a593Smuzhiyun return -EINVAL;
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun vb = q->bufs[b->index];
429*4882a593Smuzhiyun vbuf = to_vb2_v4l2_buffer(vb);
430*4882a593Smuzhiyun ret = __verify_planes_array(vb, b);
431*4882a593Smuzhiyun if (ret)
432*4882a593Smuzhiyun return ret;
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun if (!is_prepare && (b->flags & V4L2_BUF_FLAG_REQUEST_FD) &&
435*4882a593Smuzhiyun vb->state != VB2_BUF_STATE_DEQUEUED) {
436*4882a593Smuzhiyun dprintk(q, 1, "%s: buffer is not in dequeued state\n", opname);
437*4882a593Smuzhiyun return -EINVAL;
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun if (!vb->prepared) {
441*4882a593Smuzhiyun set_buffer_cache_hints(q, vb, b);
442*4882a593Smuzhiyun /* Copy relevant information provided by the userspace */
443*4882a593Smuzhiyun memset(vbuf->planes, 0,
444*4882a593Smuzhiyun sizeof(vbuf->planes[0]) * vb->num_planes);
445*4882a593Smuzhiyun ret = vb2_fill_vb2_v4l2_buffer(vb, b);
446*4882a593Smuzhiyun if (ret)
447*4882a593Smuzhiyun return ret;
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun if (is_prepare)
451*4882a593Smuzhiyun return 0;
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun if (!(b->flags & V4L2_BUF_FLAG_REQUEST_FD)) {
454*4882a593Smuzhiyun if (q->requires_requests) {
455*4882a593Smuzhiyun dprintk(q, 1, "%s: queue requires requests\n", opname);
456*4882a593Smuzhiyun return -EBADR;
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun if (q->uses_requests) {
459*4882a593Smuzhiyun dprintk(q, 1, "%s: queue uses requests\n", opname);
460*4882a593Smuzhiyun return -EBUSY;
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun return 0;
463*4882a593Smuzhiyun } else if (!q->supports_requests) {
464*4882a593Smuzhiyun dprintk(q, 1, "%s: queue does not support requests\n", opname);
465*4882a593Smuzhiyun return -EBADR;
466*4882a593Smuzhiyun } else if (q->uses_qbuf) {
467*4882a593Smuzhiyun dprintk(q, 1, "%s: queue does not use requests\n", opname);
468*4882a593Smuzhiyun return -EBUSY;
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun /*
472*4882a593Smuzhiyun * For proper locking when queueing a request you need to be able
473*4882a593Smuzhiyun * to lock access to the vb2 queue, so check that there is a lock
474*4882a593Smuzhiyun * that we can use. In addition p_req must be non-NULL.
475*4882a593Smuzhiyun */
476*4882a593Smuzhiyun if (WARN_ON(!q->lock || !p_req))
477*4882a593Smuzhiyun return -EINVAL;
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun /*
480*4882a593Smuzhiyun * Make sure this op is implemented by the driver. It's easy to forget
481*4882a593Smuzhiyun * this callback, but is it important when canceling a buffer in a
482*4882a593Smuzhiyun * queued request.
483*4882a593Smuzhiyun */
484*4882a593Smuzhiyun if (WARN_ON(!q->ops->buf_request_complete))
485*4882a593Smuzhiyun return -EINVAL;
486*4882a593Smuzhiyun /*
487*4882a593Smuzhiyun * Make sure this op is implemented by the driver for the output queue.
488*4882a593Smuzhiyun * It's easy to forget this callback, but is it important to correctly
489*4882a593Smuzhiyun * validate the 'field' value at QBUF time.
490*4882a593Smuzhiyun */
491*4882a593Smuzhiyun if (WARN_ON((q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT ||
492*4882a593Smuzhiyun q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) &&
493*4882a593Smuzhiyun !q->ops->buf_out_validate))
494*4882a593Smuzhiyun return -EINVAL;
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun if (b->request_fd < 0) {
497*4882a593Smuzhiyun dprintk(q, 1, "%s: request_fd < 0\n", opname);
498*4882a593Smuzhiyun return -EINVAL;
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun req = media_request_get_by_fd(mdev, b->request_fd);
502*4882a593Smuzhiyun if (IS_ERR(req)) {
503*4882a593Smuzhiyun dprintk(q, 1, "%s: invalid request_fd\n", opname);
504*4882a593Smuzhiyun return PTR_ERR(req);
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun /*
508*4882a593Smuzhiyun * Early sanity check. This is checked again when the buffer
509*4882a593Smuzhiyun * is bound to the request in vb2_core_qbuf().
510*4882a593Smuzhiyun */
511*4882a593Smuzhiyun if (req->state != MEDIA_REQUEST_STATE_IDLE &&
512*4882a593Smuzhiyun req->state != MEDIA_REQUEST_STATE_UPDATING) {
513*4882a593Smuzhiyun dprintk(q, 1, "%s: request is not idle\n", opname);
514*4882a593Smuzhiyun media_request_put(req);
515*4882a593Smuzhiyun return -EBUSY;
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun *p_req = req;
519*4882a593Smuzhiyun vbuf->request_fd = b->request_fd;
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun return 0;
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun /*
525*4882a593Smuzhiyun * __fill_v4l2_buffer() - fill in a struct v4l2_buffer with information to be
526*4882a593Smuzhiyun * returned to userspace
527*4882a593Smuzhiyun */
__fill_v4l2_buffer(struct vb2_buffer * vb,void * pb)528*4882a593Smuzhiyun static void __fill_v4l2_buffer(struct vb2_buffer *vb, void *pb)
529*4882a593Smuzhiyun {
530*4882a593Smuzhiyun struct v4l2_buffer *b = pb;
531*4882a593Smuzhiyun struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
532*4882a593Smuzhiyun struct vb2_queue *q = vb->vb2_queue;
533*4882a593Smuzhiyun unsigned int plane;
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun /* Copy back data such as timestamp, flags, etc. */
536*4882a593Smuzhiyun b->index = vb->index;
537*4882a593Smuzhiyun b->type = vb->type;
538*4882a593Smuzhiyun b->memory = vb->memory;
539*4882a593Smuzhiyun b->bytesused = 0;
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun b->flags = vbuf->flags;
542*4882a593Smuzhiyun b->field = vbuf->field;
543*4882a593Smuzhiyun v4l2_buffer_set_timestamp(b, vb->timestamp);
544*4882a593Smuzhiyun b->timecode = vbuf->timecode;
545*4882a593Smuzhiyun b->sequence = vbuf->sequence;
546*4882a593Smuzhiyun b->reserved2 = 0;
547*4882a593Smuzhiyun b->request_fd = 0;
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun if (q->is_multiplanar) {
550*4882a593Smuzhiyun /*
551*4882a593Smuzhiyun * Fill in plane-related data if userspace provided an array
552*4882a593Smuzhiyun * for it. The caller has already verified memory and size.
553*4882a593Smuzhiyun */
554*4882a593Smuzhiyun b->length = vb->num_planes;
555*4882a593Smuzhiyun for (plane = 0; plane < vb->num_planes; ++plane) {
556*4882a593Smuzhiyun struct v4l2_plane *pdst = &b->m.planes[plane];
557*4882a593Smuzhiyun struct vb2_plane *psrc = &vb->planes[plane];
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun pdst->bytesused = psrc->bytesused;
560*4882a593Smuzhiyun pdst->length = psrc->length;
561*4882a593Smuzhiyun if (q->memory == VB2_MEMORY_MMAP)
562*4882a593Smuzhiyun pdst->m.mem_offset = psrc->m.offset;
563*4882a593Smuzhiyun else if (q->memory == VB2_MEMORY_USERPTR)
564*4882a593Smuzhiyun pdst->m.userptr = psrc->m.userptr;
565*4882a593Smuzhiyun else if (q->memory == VB2_MEMORY_DMABUF)
566*4882a593Smuzhiyun pdst->m.fd = psrc->m.fd;
567*4882a593Smuzhiyun pdst->data_offset = psrc->data_offset;
568*4882a593Smuzhiyun memset(pdst->reserved, 0, sizeof(pdst->reserved));
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun } else {
571*4882a593Smuzhiyun /*
572*4882a593Smuzhiyun * We use length and offset in v4l2_planes array even for
573*4882a593Smuzhiyun * single-planar buffers, but userspace does not.
574*4882a593Smuzhiyun */
575*4882a593Smuzhiyun b->length = vb->planes[0].length;
576*4882a593Smuzhiyun b->bytesused = vb->planes[0].bytesused;
577*4882a593Smuzhiyun if (q->memory == VB2_MEMORY_MMAP)
578*4882a593Smuzhiyun b->m.offset = vb->planes[0].m.offset;
579*4882a593Smuzhiyun else if (q->memory == VB2_MEMORY_USERPTR)
580*4882a593Smuzhiyun b->m.userptr = vb->planes[0].m.userptr;
581*4882a593Smuzhiyun else if (q->memory == VB2_MEMORY_DMABUF)
582*4882a593Smuzhiyun b->m.fd = vb->planes[0].m.fd;
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun /*
586*4882a593Smuzhiyun * Clear any buffer state related flags.
587*4882a593Smuzhiyun */
588*4882a593Smuzhiyun b->flags &= ~V4L2_BUFFER_MASK_FLAGS;
589*4882a593Smuzhiyun b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK;
590*4882a593Smuzhiyun if (!q->copy_timestamp) {
591*4882a593Smuzhiyun /*
592*4882a593Smuzhiyun * For non-COPY timestamps, drop timestamp source bits
593*4882a593Smuzhiyun * and obtain the timestamp source from the queue.
594*4882a593Smuzhiyun */
595*4882a593Smuzhiyun b->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
596*4882a593Smuzhiyun b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun switch (vb->state) {
600*4882a593Smuzhiyun case VB2_BUF_STATE_QUEUED:
601*4882a593Smuzhiyun case VB2_BUF_STATE_ACTIVE:
602*4882a593Smuzhiyun b->flags |= V4L2_BUF_FLAG_QUEUED;
603*4882a593Smuzhiyun break;
604*4882a593Smuzhiyun case VB2_BUF_STATE_IN_REQUEST:
605*4882a593Smuzhiyun b->flags |= V4L2_BUF_FLAG_IN_REQUEST;
606*4882a593Smuzhiyun break;
607*4882a593Smuzhiyun case VB2_BUF_STATE_ERROR:
608*4882a593Smuzhiyun b->flags |= V4L2_BUF_FLAG_ERROR;
609*4882a593Smuzhiyun fallthrough;
610*4882a593Smuzhiyun case VB2_BUF_STATE_DONE:
611*4882a593Smuzhiyun b->flags |= V4L2_BUF_FLAG_DONE;
612*4882a593Smuzhiyun break;
613*4882a593Smuzhiyun case VB2_BUF_STATE_PREPARING:
614*4882a593Smuzhiyun case VB2_BUF_STATE_DEQUEUED:
615*4882a593Smuzhiyun /* nothing */
616*4882a593Smuzhiyun break;
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun if ((vb->state == VB2_BUF_STATE_DEQUEUED ||
620*4882a593Smuzhiyun vb->state == VB2_BUF_STATE_IN_REQUEST) &&
621*4882a593Smuzhiyun vb->synced && vb->prepared)
622*4882a593Smuzhiyun b->flags |= V4L2_BUF_FLAG_PREPARED;
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun if (vb2_buffer_in_use(q, vb))
625*4882a593Smuzhiyun b->flags |= V4L2_BUF_FLAG_MAPPED;
626*4882a593Smuzhiyun if (vbuf->request_fd >= 0) {
627*4882a593Smuzhiyun b->flags |= V4L2_BUF_FLAG_REQUEST_FD;
628*4882a593Smuzhiyun b->request_fd = vbuf->request_fd;
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun }
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun /*
633*4882a593Smuzhiyun * __fill_vb2_buffer() - fill a vb2_buffer with information provided in a
634*4882a593Smuzhiyun * v4l2_buffer by the userspace. It also verifies that struct
635*4882a593Smuzhiyun * v4l2_buffer has a valid number of planes.
636*4882a593Smuzhiyun */
__fill_vb2_buffer(struct vb2_buffer * vb,struct vb2_plane * planes)637*4882a593Smuzhiyun static int __fill_vb2_buffer(struct vb2_buffer *vb, struct vb2_plane *planes)
638*4882a593Smuzhiyun {
639*4882a593Smuzhiyun struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
640*4882a593Smuzhiyun unsigned int plane;
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun if (!vb->vb2_queue->copy_timestamp)
643*4882a593Smuzhiyun vb->timestamp = 0;
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun for (plane = 0; plane < vb->num_planes; ++plane) {
646*4882a593Smuzhiyun if (vb->vb2_queue->memory != VB2_MEMORY_MMAP) {
647*4882a593Smuzhiyun planes[plane].m = vbuf->planes[plane].m;
648*4882a593Smuzhiyun planes[plane].length = vbuf->planes[plane].length;
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun planes[plane].bytesused = vbuf->planes[plane].bytesused;
651*4882a593Smuzhiyun planes[plane].data_offset = vbuf->planes[plane].data_offset;
652*4882a593Smuzhiyun }
653*4882a593Smuzhiyun return 0;
654*4882a593Smuzhiyun }
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun static const struct vb2_buf_ops v4l2_buf_ops = {
657*4882a593Smuzhiyun .verify_planes_array = __verify_planes_array_core,
658*4882a593Smuzhiyun .init_buffer = __init_vb2_v4l2_buffer,
659*4882a593Smuzhiyun .fill_user_buffer = __fill_v4l2_buffer,
660*4882a593Smuzhiyun .fill_vb2_buffer = __fill_vb2_buffer,
661*4882a593Smuzhiyun .copy_timestamp = __copy_timestamp,
662*4882a593Smuzhiyun };
663*4882a593Smuzhiyun
vb2_find_timestamp(const struct vb2_queue * q,u64 timestamp,unsigned int start_idx)664*4882a593Smuzhiyun int vb2_find_timestamp(const struct vb2_queue *q, u64 timestamp,
665*4882a593Smuzhiyun unsigned int start_idx)
666*4882a593Smuzhiyun {
667*4882a593Smuzhiyun unsigned int i;
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun for (i = start_idx; i < q->num_buffers; i++)
670*4882a593Smuzhiyun if (q->bufs[i]->copied_timestamp &&
671*4882a593Smuzhiyun q->bufs[i]->timestamp == timestamp)
672*4882a593Smuzhiyun return i;
673*4882a593Smuzhiyun return -1;
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vb2_find_timestamp);
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun /*
678*4882a593Smuzhiyun * vb2_querybuf() - query video buffer information
679*4882a593Smuzhiyun * @q: videobuf queue
680*4882a593Smuzhiyun * @b: buffer struct passed from userspace to vidioc_querybuf handler
681*4882a593Smuzhiyun * in driver
682*4882a593Smuzhiyun *
683*4882a593Smuzhiyun * Should be called from vidioc_querybuf ioctl handler in driver.
684*4882a593Smuzhiyun * This function will verify the passed v4l2_buffer structure and fill the
685*4882a593Smuzhiyun * relevant information for the userspace.
686*4882a593Smuzhiyun *
687*4882a593Smuzhiyun * The return values from this function are intended to be directly returned
688*4882a593Smuzhiyun * from vidioc_querybuf handler in driver.
689*4882a593Smuzhiyun */
vb2_querybuf(struct vb2_queue * q,struct v4l2_buffer * b)690*4882a593Smuzhiyun int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
691*4882a593Smuzhiyun {
692*4882a593Smuzhiyun struct vb2_buffer *vb;
693*4882a593Smuzhiyun int ret;
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun if (b->type != q->type) {
696*4882a593Smuzhiyun dprintk(q, 1, "wrong buffer type\n");
697*4882a593Smuzhiyun return -EINVAL;
698*4882a593Smuzhiyun }
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun if (b->index >= q->num_buffers) {
701*4882a593Smuzhiyun dprintk(q, 1, "buffer index out of range\n");
702*4882a593Smuzhiyun return -EINVAL;
703*4882a593Smuzhiyun }
704*4882a593Smuzhiyun vb = q->bufs[b->index];
705*4882a593Smuzhiyun ret = __verify_planes_array(vb, b);
706*4882a593Smuzhiyun if (!ret)
707*4882a593Smuzhiyun vb2_core_querybuf(q, b->index, b);
708*4882a593Smuzhiyun return ret;
709*4882a593Smuzhiyun }
710*4882a593Smuzhiyun EXPORT_SYMBOL(vb2_querybuf);
711*4882a593Smuzhiyun
fill_buf_caps(struct vb2_queue * q,u32 * caps)712*4882a593Smuzhiyun static void fill_buf_caps(struct vb2_queue *q, u32 *caps)
713*4882a593Smuzhiyun {
714*4882a593Smuzhiyun *caps = V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS;
715*4882a593Smuzhiyun if (q->io_modes & VB2_MMAP)
716*4882a593Smuzhiyun *caps |= V4L2_BUF_CAP_SUPPORTS_MMAP;
717*4882a593Smuzhiyun if (q->io_modes & VB2_USERPTR)
718*4882a593Smuzhiyun *caps |= V4L2_BUF_CAP_SUPPORTS_USERPTR;
719*4882a593Smuzhiyun if (q->io_modes & VB2_DMABUF)
720*4882a593Smuzhiyun *caps |= V4L2_BUF_CAP_SUPPORTS_DMABUF;
721*4882a593Smuzhiyun if (q->subsystem_flags & VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF)
722*4882a593Smuzhiyun *caps |= V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF;
723*4882a593Smuzhiyun if (q->allow_cache_hints && q->io_modes & VB2_MMAP)
724*4882a593Smuzhiyun *caps |= V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS;
725*4882a593Smuzhiyun #ifdef CONFIG_MEDIA_CONTROLLER_REQUEST_API
726*4882a593Smuzhiyun if (q->supports_requests)
727*4882a593Smuzhiyun *caps |= V4L2_BUF_CAP_SUPPORTS_REQUESTS;
728*4882a593Smuzhiyun #endif
729*4882a593Smuzhiyun }
730*4882a593Smuzhiyun
vb2_reqbufs(struct vb2_queue * q,struct v4l2_requestbuffers * req)731*4882a593Smuzhiyun int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
732*4882a593Smuzhiyun {
733*4882a593Smuzhiyun int ret = vb2_verify_memory_type(q, req->memory, req->type);
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun fill_buf_caps(q, &req->capabilities);
736*4882a593Smuzhiyun return ret ? ret : vb2_core_reqbufs(q, req->memory, &req->count);
737*4882a593Smuzhiyun }
738*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vb2_reqbufs);
739*4882a593Smuzhiyun
vb2_prepare_buf(struct vb2_queue * q,struct media_device * mdev,struct v4l2_buffer * b)740*4882a593Smuzhiyun int vb2_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
741*4882a593Smuzhiyun struct v4l2_buffer *b)
742*4882a593Smuzhiyun {
743*4882a593Smuzhiyun int ret;
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun if (vb2_fileio_is_active(q)) {
746*4882a593Smuzhiyun dprintk(q, 1, "file io in progress\n");
747*4882a593Smuzhiyun return -EBUSY;
748*4882a593Smuzhiyun }
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun if (b->flags & V4L2_BUF_FLAG_REQUEST_FD)
751*4882a593Smuzhiyun return -EINVAL;
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun ret = vb2_queue_or_prepare_buf(q, mdev, b, true, NULL);
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun return ret ? ret : vb2_core_prepare_buf(q, b->index, b);
756*4882a593Smuzhiyun }
757*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vb2_prepare_buf);
758*4882a593Smuzhiyun
vb2_create_bufs(struct vb2_queue * q,struct v4l2_create_buffers * create)759*4882a593Smuzhiyun int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
760*4882a593Smuzhiyun {
761*4882a593Smuzhiyun unsigned requested_planes = 1;
762*4882a593Smuzhiyun unsigned requested_sizes[VIDEO_MAX_PLANES];
763*4882a593Smuzhiyun struct v4l2_format *f = &create->format;
764*4882a593Smuzhiyun int ret = vb2_verify_memory_type(q, create->memory, f->type);
765*4882a593Smuzhiyun unsigned i;
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun fill_buf_caps(q, &create->capabilities);
768*4882a593Smuzhiyun create->index = q->num_buffers;
769*4882a593Smuzhiyun if (create->count == 0)
770*4882a593Smuzhiyun return ret != -EBUSY ? ret : 0;
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun switch (f->type) {
773*4882a593Smuzhiyun case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
774*4882a593Smuzhiyun case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
775*4882a593Smuzhiyun requested_planes = f->fmt.pix_mp.num_planes;
776*4882a593Smuzhiyun if (requested_planes == 0 ||
777*4882a593Smuzhiyun requested_planes > VIDEO_MAX_PLANES)
778*4882a593Smuzhiyun return -EINVAL;
779*4882a593Smuzhiyun for (i = 0; i < requested_planes; i++)
780*4882a593Smuzhiyun requested_sizes[i] =
781*4882a593Smuzhiyun f->fmt.pix_mp.plane_fmt[i].sizeimage;
782*4882a593Smuzhiyun break;
783*4882a593Smuzhiyun case V4L2_BUF_TYPE_VIDEO_CAPTURE:
784*4882a593Smuzhiyun case V4L2_BUF_TYPE_VIDEO_OUTPUT:
785*4882a593Smuzhiyun requested_sizes[0] = f->fmt.pix.sizeimage;
786*4882a593Smuzhiyun break;
787*4882a593Smuzhiyun case V4L2_BUF_TYPE_VBI_CAPTURE:
788*4882a593Smuzhiyun case V4L2_BUF_TYPE_VBI_OUTPUT:
789*4882a593Smuzhiyun requested_sizes[0] = f->fmt.vbi.samples_per_line *
790*4882a593Smuzhiyun (f->fmt.vbi.count[0] + f->fmt.vbi.count[1]);
791*4882a593Smuzhiyun break;
792*4882a593Smuzhiyun case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
793*4882a593Smuzhiyun case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
794*4882a593Smuzhiyun requested_sizes[0] = f->fmt.sliced.io_size;
795*4882a593Smuzhiyun break;
796*4882a593Smuzhiyun case V4L2_BUF_TYPE_SDR_CAPTURE:
797*4882a593Smuzhiyun case V4L2_BUF_TYPE_SDR_OUTPUT:
798*4882a593Smuzhiyun requested_sizes[0] = f->fmt.sdr.buffersize;
799*4882a593Smuzhiyun break;
800*4882a593Smuzhiyun case V4L2_BUF_TYPE_META_CAPTURE:
801*4882a593Smuzhiyun case V4L2_BUF_TYPE_META_OUTPUT:
802*4882a593Smuzhiyun requested_sizes[0] = f->fmt.meta.buffersize;
803*4882a593Smuzhiyun break;
804*4882a593Smuzhiyun default:
805*4882a593Smuzhiyun return -EINVAL;
806*4882a593Smuzhiyun }
807*4882a593Smuzhiyun for (i = 0; i < requested_planes; i++)
808*4882a593Smuzhiyun if (requested_sizes[i] == 0)
809*4882a593Smuzhiyun return -EINVAL;
810*4882a593Smuzhiyun return ret ? ret : vb2_core_create_bufs(q, create->memory,
811*4882a593Smuzhiyun &create->count,
812*4882a593Smuzhiyun requested_planes,
813*4882a593Smuzhiyun requested_sizes);
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vb2_create_bufs);
816*4882a593Smuzhiyun
vb2_qbuf(struct vb2_queue * q,struct media_device * mdev,struct v4l2_buffer * b)817*4882a593Smuzhiyun int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev,
818*4882a593Smuzhiyun struct v4l2_buffer *b)
819*4882a593Smuzhiyun {
820*4882a593Smuzhiyun struct media_request *req = NULL;
821*4882a593Smuzhiyun int ret;
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun if (vb2_fileio_is_active(q)) {
824*4882a593Smuzhiyun dprintk(q, 1, "file io in progress\n");
825*4882a593Smuzhiyun return -EBUSY;
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun ret = vb2_queue_or_prepare_buf(q, mdev, b, false, &req);
829*4882a593Smuzhiyun if (ret)
830*4882a593Smuzhiyun return ret;
831*4882a593Smuzhiyun ret = vb2_core_qbuf(q, b->index, b, req);
832*4882a593Smuzhiyun if (req)
833*4882a593Smuzhiyun media_request_put(req);
834*4882a593Smuzhiyun return ret;
835*4882a593Smuzhiyun }
836*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vb2_qbuf);
837*4882a593Smuzhiyun
vb2_dqbuf(struct vb2_queue * q,struct v4l2_buffer * b,bool nonblocking)838*4882a593Smuzhiyun int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
839*4882a593Smuzhiyun {
840*4882a593Smuzhiyun int ret;
841*4882a593Smuzhiyun
842*4882a593Smuzhiyun if (vb2_fileio_is_active(q)) {
843*4882a593Smuzhiyun dprintk(q, 1, "file io in progress\n");
844*4882a593Smuzhiyun return -EBUSY;
845*4882a593Smuzhiyun }
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun if (b->type != q->type) {
848*4882a593Smuzhiyun dprintk(q, 1, "invalid buffer type\n");
849*4882a593Smuzhiyun return -EINVAL;
850*4882a593Smuzhiyun }
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun ret = vb2_core_dqbuf(q, NULL, b, nonblocking);
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun if (!q->is_output &&
855*4882a593Smuzhiyun b->flags & V4L2_BUF_FLAG_DONE &&
856*4882a593Smuzhiyun b->flags & V4L2_BUF_FLAG_LAST)
857*4882a593Smuzhiyun q->last_buffer_dequeued = true;
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun /*
860*4882a593Smuzhiyun * After calling the VIDIOC_DQBUF V4L2_BUF_FLAG_DONE must be
861*4882a593Smuzhiyun * cleared.
862*4882a593Smuzhiyun */
863*4882a593Smuzhiyun b->flags &= ~V4L2_BUF_FLAG_DONE;
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun return ret;
866*4882a593Smuzhiyun }
867*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vb2_dqbuf);
868*4882a593Smuzhiyun
vb2_streamon(struct vb2_queue * q,enum v4l2_buf_type type)869*4882a593Smuzhiyun int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
870*4882a593Smuzhiyun {
871*4882a593Smuzhiyun if (vb2_fileio_is_active(q)) {
872*4882a593Smuzhiyun dprintk(q, 1, "file io in progress\n");
873*4882a593Smuzhiyun return -EBUSY;
874*4882a593Smuzhiyun }
875*4882a593Smuzhiyun return vb2_core_streamon(q, type);
876*4882a593Smuzhiyun }
877*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vb2_streamon);
878*4882a593Smuzhiyun
vb2_streamoff(struct vb2_queue * q,enum v4l2_buf_type type)879*4882a593Smuzhiyun int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
880*4882a593Smuzhiyun {
881*4882a593Smuzhiyun if (vb2_fileio_is_active(q)) {
882*4882a593Smuzhiyun dprintk(q, 1, "file io in progress\n");
883*4882a593Smuzhiyun return -EBUSY;
884*4882a593Smuzhiyun }
885*4882a593Smuzhiyun return vb2_core_streamoff(q, type);
886*4882a593Smuzhiyun }
887*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vb2_streamoff);
888*4882a593Smuzhiyun
vb2_expbuf(struct vb2_queue * q,struct v4l2_exportbuffer * eb)889*4882a593Smuzhiyun int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
890*4882a593Smuzhiyun {
891*4882a593Smuzhiyun return vb2_core_expbuf(q, &eb->fd, eb->type, eb->index,
892*4882a593Smuzhiyun eb->plane, eb->flags);
893*4882a593Smuzhiyun }
894*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vb2_expbuf);
895*4882a593Smuzhiyun
vb2_queue_init_name(struct vb2_queue * q,const char * name)896*4882a593Smuzhiyun int vb2_queue_init_name(struct vb2_queue *q, const char *name)
897*4882a593Smuzhiyun {
898*4882a593Smuzhiyun /*
899*4882a593Smuzhiyun * Sanity check
900*4882a593Smuzhiyun */
901*4882a593Smuzhiyun if (WARN_ON(!q) ||
902*4882a593Smuzhiyun WARN_ON(q->timestamp_flags &
903*4882a593Smuzhiyun ~(V4L2_BUF_FLAG_TIMESTAMP_MASK |
904*4882a593Smuzhiyun V4L2_BUF_FLAG_TSTAMP_SRC_MASK)))
905*4882a593Smuzhiyun return -EINVAL;
906*4882a593Smuzhiyun
907*4882a593Smuzhiyun /* Warn that the driver should choose an appropriate timestamp type */
908*4882a593Smuzhiyun WARN_ON((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
909*4882a593Smuzhiyun V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN);
910*4882a593Smuzhiyun
911*4882a593Smuzhiyun /* Warn that vb2_memory should match with v4l2_memory */
912*4882a593Smuzhiyun if (WARN_ON(VB2_MEMORY_MMAP != (int)V4L2_MEMORY_MMAP)
913*4882a593Smuzhiyun || WARN_ON(VB2_MEMORY_USERPTR != (int)V4L2_MEMORY_USERPTR)
914*4882a593Smuzhiyun || WARN_ON(VB2_MEMORY_DMABUF != (int)V4L2_MEMORY_DMABUF))
915*4882a593Smuzhiyun return -EINVAL;
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun if (q->buf_struct_size == 0)
918*4882a593Smuzhiyun q->buf_struct_size = sizeof(struct vb2_v4l2_buffer);
919*4882a593Smuzhiyun
920*4882a593Smuzhiyun q->buf_ops = &v4l2_buf_ops;
921*4882a593Smuzhiyun q->is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(q->type);
922*4882a593Smuzhiyun q->is_output = V4L2_TYPE_IS_OUTPUT(q->type);
923*4882a593Smuzhiyun q->copy_timestamp = (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK)
924*4882a593Smuzhiyun == V4L2_BUF_FLAG_TIMESTAMP_COPY;
925*4882a593Smuzhiyun /*
926*4882a593Smuzhiyun * For compatibility with vb1: if QBUF hasn't been called yet, then
927*4882a593Smuzhiyun * return EPOLLERR as well. This only affects capture queues, output
928*4882a593Smuzhiyun * queues will always initialize waiting_for_buffers to false.
929*4882a593Smuzhiyun */
930*4882a593Smuzhiyun q->quirk_poll_must_check_waiting_for_buffers = true;
931*4882a593Smuzhiyun
932*4882a593Smuzhiyun if (name)
933*4882a593Smuzhiyun strscpy(q->name, name, sizeof(q->name));
934*4882a593Smuzhiyun else
935*4882a593Smuzhiyun q->name[0] = '\0';
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun return vb2_core_queue_init(q);
938*4882a593Smuzhiyun }
939*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vb2_queue_init_name);
940*4882a593Smuzhiyun
vb2_queue_init(struct vb2_queue * q)941*4882a593Smuzhiyun int vb2_queue_init(struct vb2_queue *q)
942*4882a593Smuzhiyun {
943*4882a593Smuzhiyun return vb2_queue_init_name(q, NULL);
944*4882a593Smuzhiyun }
945*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vb2_queue_init);
946*4882a593Smuzhiyun
vb2_queue_release(struct vb2_queue * q)947*4882a593Smuzhiyun void vb2_queue_release(struct vb2_queue *q)
948*4882a593Smuzhiyun {
949*4882a593Smuzhiyun vb2_core_queue_release(q);
950*4882a593Smuzhiyun }
951*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vb2_queue_release);
952*4882a593Smuzhiyun
vb2_poll(struct vb2_queue * q,struct file * file,poll_table * wait)953*4882a593Smuzhiyun __poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
954*4882a593Smuzhiyun {
955*4882a593Smuzhiyun struct video_device *vfd = video_devdata(file);
956*4882a593Smuzhiyun __poll_t res;
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun res = vb2_core_poll(q, file, wait);
959*4882a593Smuzhiyun
960*4882a593Smuzhiyun if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
961*4882a593Smuzhiyun struct v4l2_fh *fh = file->private_data;
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun poll_wait(file, &fh->wait, wait);
964*4882a593Smuzhiyun if (v4l2_event_pending(fh))
965*4882a593Smuzhiyun res |= EPOLLPRI;
966*4882a593Smuzhiyun }
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun return res;
969*4882a593Smuzhiyun }
970*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vb2_poll);
971*4882a593Smuzhiyun
972*4882a593Smuzhiyun /*
973*4882a593Smuzhiyun * The following functions are not part of the vb2 core API, but are helper
974*4882a593Smuzhiyun * functions that plug into struct v4l2_ioctl_ops, struct v4l2_file_operations
975*4882a593Smuzhiyun * and struct vb2_ops.
976*4882a593Smuzhiyun * They contain boilerplate code that most if not all drivers have to do
977*4882a593Smuzhiyun * and so they simplify the driver code.
978*4882a593Smuzhiyun */
979*4882a593Smuzhiyun
980*4882a593Smuzhiyun /* The queue is busy if there is a owner and you are not that owner. */
vb2_queue_is_busy(struct video_device * vdev,struct file * file)981*4882a593Smuzhiyun static inline bool vb2_queue_is_busy(struct video_device *vdev, struct file *file)
982*4882a593Smuzhiyun {
983*4882a593Smuzhiyun return vdev->queue->owner && vdev->queue->owner != file->private_data;
984*4882a593Smuzhiyun }
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun /* vb2 ioctl helpers */
987*4882a593Smuzhiyun
vb2_ioctl_reqbufs(struct file * file,void * priv,struct v4l2_requestbuffers * p)988*4882a593Smuzhiyun int vb2_ioctl_reqbufs(struct file *file, void *priv,
989*4882a593Smuzhiyun struct v4l2_requestbuffers *p)
990*4882a593Smuzhiyun {
991*4882a593Smuzhiyun struct video_device *vdev = video_devdata(file);
992*4882a593Smuzhiyun int res = vb2_verify_memory_type(vdev->queue, p->memory, p->type);
993*4882a593Smuzhiyun
994*4882a593Smuzhiyun fill_buf_caps(vdev->queue, &p->capabilities);
995*4882a593Smuzhiyun if (res)
996*4882a593Smuzhiyun return res;
997*4882a593Smuzhiyun if (vb2_queue_is_busy(vdev, file))
998*4882a593Smuzhiyun return -EBUSY;
999*4882a593Smuzhiyun res = vb2_core_reqbufs(vdev->queue, p->memory, &p->count);
1000*4882a593Smuzhiyun /* If count == 0, then the owner has released all buffers and he
1001*4882a593Smuzhiyun is no longer owner of the queue. Otherwise we have a new owner. */
1002*4882a593Smuzhiyun if (res == 0)
1003*4882a593Smuzhiyun vdev->queue->owner = p->count ? file->private_data : NULL;
1004*4882a593Smuzhiyun return res;
1005*4882a593Smuzhiyun }
1006*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vb2_ioctl_reqbufs);
1007*4882a593Smuzhiyun
vb2_ioctl_create_bufs(struct file * file,void * priv,struct v4l2_create_buffers * p)1008*4882a593Smuzhiyun int vb2_ioctl_create_bufs(struct file *file, void *priv,
1009*4882a593Smuzhiyun struct v4l2_create_buffers *p)
1010*4882a593Smuzhiyun {
1011*4882a593Smuzhiyun struct video_device *vdev = video_devdata(file);
1012*4882a593Smuzhiyun int res = vb2_verify_memory_type(vdev->queue, p->memory,
1013*4882a593Smuzhiyun p->format.type);
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun p->index = vdev->queue->num_buffers;
1016*4882a593Smuzhiyun fill_buf_caps(vdev->queue, &p->capabilities);
1017*4882a593Smuzhiyun /*
1018*4882a593Smuzhiyun * If count == 0, then just check if memory and type are valid.
1019*4882a593Smuzhiyun * Any -EBUSY result from vb2_verify_memory_type can be mapped to 0.
1020*4882a593Smuzhiyun */
1021*4882a593Smuzhiyun if (p->count == 0)
1022*4882a593Smuzhiyun return res != -EBUSY ? res : 0;
1023*4882a593Smuzhiyun if (res)
1024*4882a593Smuzhiyun return res;
1025*4882a593Smuzhiyun if (vb2_queue_is_busy(vdev, file))
1026*4882a593Smuzhiyun return -EBUSY;
1027*4882a593Smuzhiyun
1028*4882a593Smuzhiyun res = vb2_create_bufs(vdev->queue, p);
1029*4882a593Smuzhiyun if (res == 0)
1030*4882a593Smuzhiyun vdev->queue->owner = file->private_data;
1031*4882a593Smuzhiyun return res;
1032*4882a593Smuzhiyun }
1033*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vb2_ioctl_create_bufs);
1034*4882a593Smuzhiyun
vb2_ioctl_prepare_buf(struct file * file,void * priv,struct v4l2_buffer * p)1035*4882a593Smuzhiyun int vb2_ioctl_prepare_buf(struct file *file, void *priv,
1036*4882a593Smuzhiyun struct v4l2_buffer *p)
1037*4882a593Smuzhiyun {
1038*4882a593Smuzhiyun struct video_device *vdev = video_devdata(file);
1039*4882a593Smuzhiyun
1040*4882a593Smuzhiyun if (vb2_queue_is_busy(vdev, file))
1041*4882a593Smuzhiyun return -EBUSY;
1042*4882a593Smuzhiyun return vb2_prepare_buf(vdev->queue, vdev->v4l2_dev->mdev, p);
1043*4882a593Smuzhiyun }
1044*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vb2_ioctl_prepare_buf);
1045*4882a593Smuzhiyun
vb2_ioctl_querybuf(struct file * file,void * priv,struct v4l2_buffer * p)1046*4882a593Smuzhiyun int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
1047*4882a593Smuzhiyun {
1048*4882a593Smuzhiyun struct video_device *vdev = video_devdata(file);
1049*4882a593Smuzhiyun
1050*4882a593Smuzhiyun /* No need to call vb2_queue_is_busy(), anyone can query buffers. */
1051*4882a593Smuzhiyun return vb2_querybuf(vdev->queue, p);
1052*4882a593Smuzhiyun }
1053*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vb2_ioctl_querybuf);
1054*4882a593Smuzhiyun
vb2_ioctl_qbuf(struct file * file,void * priv,struct v4l2_buffer * p)1055*4882a593Smuzhiyun int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
1056*4882a593Smuzhiyun {
1057*4882a593Smuzhiyun struct video_device *vdev = video_devdata(file);
1058*4882a593Smuzhiyun
1059*4882a593Smuzhiyun if (vb2_queue_is_busy(vdev, file))
1060*4882a593Smuzhiyun return -EBUSY;
1061*4882a593Smuzhiyun return vb2_qbuf(vdev->queue, vdev->v4l2_dev->mdev, p);
1062*4882a593Smuzhiyun }
1063*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vb2_ioctl_qbuf);
1064*4882a593Smuzhiyun
vb2_ioctl_dqbuf(struct file * file,void * priv,struct v4l2_buffer * p)1065*4882a593Smuzhiyun int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
1066*4882a593Smuzhiyun {
1067*4882a593Smuzhiyun struct video_device *vdev = video_devdata(file);
1068*4882a593Smuzhiyun
1069*4882a593Smuzhiyun if (vb2_queue_is_busy(vdev, file))
1070*4882a593Smuzhiyun return -EBUSY;
1071*4882a593Smuzhiyun return vb2_dqbuf(vdev->queue, p, file->f_flags & O_NONBLOCK);
1072*4882a593Smuzhiyun }
1073*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vb2_ioctl_dqbuf);
1074*4882a593Smuzhiyun
vb2_ioctl_streamon(struct file * file,void * priv,enum v4l2_buf_type i)1075*4882a593Smuzhiyun int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
1076*4882a593Smuzhiyun {
1077*4882a593Smuzhiyun struct video_device *vdev = video_devdata(file);
1078*4882a593Smuzhiyun
1079*4882a593Smuzhiyun if (vb2_queue_is_busy(vdev, file))
1080*4882a593Smuzhiyun return -EBUSY;
1081*4882a593Smuzhiyun return vb2_streamon(vdev->queue, i);
1082*4882a593Smuzhiyun }
1083*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vb2_ioctl_streamon);
1084*4882a593Smuzhiyun
vb2_ioctl_streamoff(struct file * file,void * priv,enum v4l2_buf_type i)1085*4882a593Smuzhiyun int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
1086*4882a593Smuzhiyun {
1087*4882a593Smuzhiyun struct video_device *vdev = video_devdata(file);
1088*4882a593Smuzhiyun
1089*4882a593Smuzhiyun if (vb2_queue_is_busy(vdev, file))
1090*4882a593Smuzhiyun return -EBUSY;
1091*4882a593Smuzhiyun return vb2_streamoff(vdev->queue, i);
1092*4882a593Smuzhiyun }
1093*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vb2_ioctl_streamoff);
1094*4882a593Smuzhiyun
vb2_ioctl_expbuf(struct file * file,void * priv,struct v4l2_exportbuffer * p)1095*4882a593Smuzhiyun int vb2_ioctl_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *p)
1096*4882a593Smuzhiyun {
1097*4882a593Smuzhiyun struct video_device *vdev = video_devdata(file);
1098*4882a593Smuzhiyun
1099*4882a593Smuzhiyun if (vb2_queue_is_busy(vdev, file))
1100*4882a593Smuzhiyun return -EBUSY;
1101*4882a593Smuzhiyun return vb2_expbuf(vdev->queue, p);
1102*4882a593Smuzhiyun }
1103*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vb2_ioctl_expbuf);
1104*4882a593Smuzhiyun
1105*4882a593Smuzhiyun /* v4l2_file_operations helpers */
1106*4882a593Smuzhiyun
vb2_fop_mmap(struct file * file,struct vm_area_struct * vma)1107*4882a593Smuzhiyun int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma)
1108*4882a593Smuzhiyun {
1109*4882a593Smuzhiyun struct video_device *vdev = video_devdata(file);
1110*4882a593Smuzhiyun
1111*4882a593Smuzhiyun return vb2_mmap(vdev->queue, vma);
1112*4882a593Smuzhiyun }
1113*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vb2_fop_mmap);
1114*4882a593Smuzhiyun
_vb2_fop_release(struct file * file,struct mutex * lock)1115*4882a593Smuzhiyun int _vb2_fop_release(struct file *file, struct mutex *lock)
1116*4882a593Smuzhiyun {
1117*4882a593Smuzhiyun struct video_device *vdev = video_devdata(file);
1118*4882a593Smuzhiyun
1119*4882a593Smuzhiyun if (lock)
1120*4882a593Smuzhiyun mutex_lock(lock);
1121*4882a593Smuzhiyun if (file->private_data == vdev->queue->owner) {
1122*4882a593Smuzhiyun vb2_queue_release(vdev->queue);
1123*4882a593Smuzhiyun vdev->queue->owner = NULL;
1124*4882a593Smuzhiyun }
1125*4882a593Smuzhiyun if (lock)
1126*4882a593Smuzhiyun mutex_unlock(lock);
1127*4882a593Smuzhiyun return v4l2_fh_release(file);
1128*4882a593Smuzhiyun }
1129*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(_vb2_fop_release);
1130*4882a593Smuzhiyun
vb2_fop_release(struct file * file)1131*4882a593Smuzhiyun int vb2_fop_release(struct file *file)
1132*4882a593Smuzhiyun {
1133*4882a593Smuzhiyun struct video_device *vdev = video_devdata(file);
1134*4882a593Smuzhiyun struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
1135*4882a593Smuzhiyun
1136*4882a593Smuzhiyun return _vb2_fop_release(file, lock);
1137*4882a593Smuzhiyun }
1138*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vb2_fop_release);
1139*4882a593Smuzhiyun
vb2_fop_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)1140*4882a593Smuzhiyun ssize_t vb2_fop_write(struct file *file, const char __user *buf,
1141*4882a593Smuzhiyun size_t count, loff_t *ppos)
1142*4882a593Smuzhiyun {
1143*4882a593Smuzhiyun struct video_device *vdev = video_devdata(file);
1144*4882a593Smuzhiyun struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
1145*4882a593Smuzhiyun int err = -EBUSY;
1146*4882a593Smuzhiyun
1147*4882a593Smuzhiyun if (!(vdev->queue->io_modes & VB2_WRITE))
1148*4882a593Smuzhiyun return -EINVAL;
1149*4882a593Smuzhiyun if (lock && mutex_lock_interruptible(lock))
1150*4882a593Smuzhiyun return -ERESTARTSYS;
1151*4882a593Smuzhiyun if (vb2_queue_is_busy(vdev, file))
1152*4882a593Smuzhiyun goto exit;
1153*4882a593Smuzhiyun err = vb2_write(vdev->queue, buf, count, ppos,
1154*4882a593Smuzhiyun file->f_flags & O_NONBLOCK);
1155*4882a593Smuzhiyun if (vdev->queue->fileio)
1156*4882a593Smuzhiyun vdev->queue->owner = file->private_data;
1157*4882a593Smuzhiyun exit:
1158*4882a593Smuzhiyun if (lock)
1159*4882a593Smuzhiyun mutex_unlock(lock);
1160*4882a593Smuzhiyun return err;
1161*4882a593Smuzhiyun }
1162*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vb2_fop_write);
1163*4882a593Smuzhiyun
vb2_fop_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)1164*4882a593Smuzhiyun ssize_t vb2_fop_read(struct file *file, char __user *buf,
1165*4882a593Smuzhiyun size_t count, loff_t *ppos)
1166*4882a593Smuzhiyun {
1167*4882a593Smuzhiyun struct video_device *vdev = video_devdata(file);
1168*4882a593Smuzhiyun struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
1169*4882a593Smuzhiyun int err = -EBUSY;
1170*4882a593Smuzhiyun
1171*4882a593Smuzhiyun if (!(vdev->queue->io_modes & VB2_READ))
1172*4882a593Smuzhiyun return -EINVAL;
1173*4882a593Smuzhiyun if (lock && mutex_lock_interruptible(lock))
1174*4882a593Smuzhiyun return -ERESTARTSYS;
1175*4882a593Smuzhiyun if (vb2_queue_is_busy(vdev, file))
1176*4882a593Smuzhiyun goto exit;
1177*4882a593Smuzhiyun err = vb2_read(vdev->queue, buf, count, ppos,
1178*4882a593Smuzhiyun file->f_flags & O_NONBLOCK);
1179*4882a593Smuzhiyun if (vdev->queue->fileio)
1180*4882a593Smuzhiyun vdev->queue->owner = file->private_data;
1181*4882a593Smuzhiyun exit:
1182*4882a593Smuzhiyun if (lock)
1183*4882a593Smuzhiyun mutex_unlock(lock);
1184*4882a593Smuzhiyun return err;
1185*4882a593Smuzhiyun }
1186*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vb2_fop_read);
1187*4882a593Smuzhiyun
vb2_fop_poll(struct file * file,poll_table * wait)1188*4882a593Smuzhiyun __poll_t vb2_fop_poll(struct file *file, poll_table *wait)
1189*4882a593Smuzhiyun {
1190*4882a593Smuzhiyun struct video_device *vdev = video_devdata(file);
1191*4882a593Smuzhiyun struct vb2_queue *q = vdev->queue;
1192*4882a593Smuzhiyun struct mutex *lock = q->lock ? q->lock : vdev->lock;
1193*4882a593Smuzhiyun __poll_t res;
1194*4882a593Smuzhiyun void *fileio;
1195*4882a593Smuzhiyun
1196*4882a593Smuzhiyun /*
1197*4882a593Smuzhiyun * If this helper doesn't know how to lock, then you shouldn't be using
1198*4882a593Smuzhiyun * it but you should write your own.
1199*4882a593Smuzhiyun */
1200*4882a593Smuzhiyun WARN_ON(!lock);
1201*4882a593Smuzhiyun
1202*4882a593Smuzhiyun if (lock && mutex_lock_interruptible(lock))
1203*4882a593Smuzhiyun return EPOLLERR;
1204*4882a593Smuzhiyun
1205*4882a593Smuzhiyun fileio = q->fileio;
1206*4882a593Smuzhiyun
1207*4882a593Smuzhiyun res = vb2_poll(vdev->queue, file, wait);
1208*4882a593Smuzhiyun
1209*4882a593Smuzhiyun /* If fileio was started, then we have a new queue owner. */
1210*4882a593Smuzhiyun if (!fileio && q->fileio)
1211*4882a593Smuzhiyun q->owner = file->private_data;
1212*4882a593Smuzhiyun if (lock)
1213*4882a593Smuzhiyun mutex_unlock(lock);
1214*4882a593Smuzhiyun return res;
1215*4882a593Smuzhiyun }
1216*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vb2_fop_poll);
1217*4882a593Smuzhiyun
1218*4882a593Smuzhiyun #ifndef CONFIG_MMU
vb2_fop_get_unmapped_area(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)1219*4882a593Smuzhiyun unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
1220*4882a593Smuzhiyun unsigned long len, unsigned long pgoff, unsigned long flags)
1221*4882a593Smuzhiyun {
1222*4882a593Smuzhiyun struct video_device *vdev = video_devdata(file);
1223*4882a593Smuzhiyun
1224*4882a593Smuzhiyun return vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags);
1225*4882a593Smuzhiyun }
1226*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vb2_fop_get_unmapped_area);
1227*4882a593Smuzhiyun #endif
1228*4882a593Smuzhiyun
vb2_video_unregister_device(struct video_device * vdev)1229*4882a593Smuzhiyun void vb2_video_unregister_device(struct video_device *vdev)
1230*4882a593Smuzhiyun {
1231*4882a593Smuzhiyun /* Check if vdev was ever registered at all */
1232*4882a593Smuzhiyun if (!vdev || !video_is_registered(vdev))
1233*4882a593Smuzhiyun return;
1234*4882a593Smuzhiyun
1235*4882a593Smuzhiyun /*
1236*4882a593Smuzhiyun * Calling this function only makes sense if vdev->queue is set.
1237*4882a593Smuzhiyun * If it is NULL, then just call video_unregister_device() instead.
1238*4882a593Smuzhiyun */
1239*4882a593Smuzhiyun WARN_ON(!vdev->queue);
1240*4882a593Smuzhiyun
1241*4882a593Smuzhiyun /*
1242*4882a593Smuzhiyun * Take a reference to the device since video_unregister_device()
1243*4882a593Smuzhiyun * calls device_unregister(), but we don't want that to release
1244*4882a593Smuzhiyun * the device since we want to clean up the queue first.
1245*4882a593Smuzhiyun */
1246*4882a593Smuzhiyun get_device(&vdev->dev);
1247*4882a593Smuzhiyun video_unregister_device(vdev);
1248*4882a593Smuzhiyun if (vdev->queue && vdev->queue->owner) {
1249*4882a593Smuzhiyun struct mutex *lock = vdev->queue->lock ?
1250*4882a593Smuzhiyun vdev->queue->lock : vdev->lock;
1251*4882a593Smuzhiyun
1252*4882a593Smuzhiyun if (lock)
1253*4882a593Smuzhiyun mutex_lock(lock);
1254*4882a593Smuzhiyun vb2_queue_release(vdev->queue);
1255*4882a593Smuzhiyun vdev->queue->owner = NULL;
1256*4882a593Smuzhiyun if (lock)
1257*4882a593Smuzhiyun mutex_unlock(lock);
1258*4882a593Smuzhiyun }
1259*4882a593Smuzhiyun /*
1260*4882a593Smuzhiyun * Now we put the device, and in most cases this will release
1261*4882a593Smuzhiyun * everything.
1262*4882a593Smuzhiyun */
1263*4882a593Smuzhiyun put_device(&vdev->dev);
1264*4882a593Smuzhiyun }
1265*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vb2_video_unregister_device);
1266*4882a593Smuzhiyun
1267*4882a593Smuzhiyun /* vb2_ops helpers. Only use if vq->lock is non-NULL. */
1268*4882a593Smuzhiyun
vb2_ops_wait_prepare(struct vb2_queue * vq)1269*4882a593Smuzhiyun void vb2_ops_wait_prepare(struct vb2_queue *vq)
1270*4882a593Smuzhiyun {
1271*4882a593Smuzhiyun mutex_unlock(vq->lock);
1272*4882a593Smuzhiyun }
1273*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vb2_ops_wait_prepare);
1274*4882a593Smuzhiyun
vb2_ops_wait_finish(struct vb2_queue * vq)1275*4882a593Smuzhiyun void vb2_ops_wait_finish(struct vb2_queue *vq)
1276*4882a593Smuzhiyun {
1277*4882a593Smuzhiyun mutex_lock(vq->lock);
1278*4882a593Smuzhiyun }
1279*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vb2_ops_wait_finish);
1280*4882a593Smuzhiyun
1281*4882a593Smuzhiyun /*
1282*4882a593Smuzhiyun * Note that this function is called during validation time and
1283*4882a593Smuzhiyun * thus the req_queue_mutex is held to ensure no request objects
1284*4882a593Smuzhiyun * can be added or deleted while validating. So there is no need
1285*4882a593Smuzhiyun * to protect the objects list.
1286*4882a593Smuzhiyun */
vb2_request_validate(struct media_request * req)1287*4882a593Smuzhiyun int vb2_request_validate(struct media_request *req)
1288*4882a593Smuzhiyun {
1289*4882a593Smuzhiyun struct media_request_object *obj;
1290*4882a593Smuzhiyun int ret = 0;
1291*4882a593Smuzhiyun
1292*4882a593Smuzhiyun if (!vb2_request_buffer_cnt(req))
1293*4882a593Smuzhiyun return -ENOENT;
1294*4882a593Smuzhiyun
1295*4882a593Smuzhiyun list_for_each_entry(obj, &req->objects, list) {
1296*4882a593Smuzhiyun if (!obj->ops->prepare)
1297*4882a593Smuzhiyun continue;
1298*4882a593Smuzhiyun
1299*4882a593Smuzhiyun ret = obj->ops->prepare(obj);
1300*4882a593Smuzhiyun if (ret)
1301*4882a593Smuzhiyun break;
1302*4882a593Smuzhiyun }
1303*4882a593Smuzhiyun
1304*4882a593Smuzhiyun if (ret) {
1305*4882a593Smuzhiyun list_for_each_entry_continue_reverse(obj, &req->objects, list)
1306*4882a593Smuzhiyun if (obj->ops->unprepare)
1307*4882a593Smuzhiyun obj->ops->unprepare(obj);
1308*4882a593Smuzhiyun return ret;
1309*4882a593Smuzhiyun }
1310*4882a593Smuzhiyun return 0;
1311*4882a593Smuzhiyun }
1312*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vb2_request_validate);
1313*4882a593Smuzhiyun
vb2_request_queue(struct media_request * req)1314*4882a593Smuzhiyun void vb2_request_queue(struct media_request *req)
1315*4882a593Smuzhiyun {
1316*4882a593Smuzhiyun struct media_request_object *obj, *obj_safe;
1317*4882a593Smuzhiyun
1318*4882a593Smuzhiyun /*
1319*4882a593Smuzhiyun * Queue all objects. Note that buffer objects are at the end of the
1320*4882a593Smuzhiyun * objects list, after all other object types. Once buffer objects
1321*4882a593Smuzhiyun * are queued, the driver might delete them immediately (if the driver
1322*4882a593Smuzhiyun * processes the buffer at once), so we have to use
1323*4882a593Smuzhiyun * list_for_each_entry_safe() to handle the case where the object we
1324*4882a593Smuzhiyun * queue is deleted.
1325*4882a593Smuzhiyun */
1326*4882a593Smuzhiyun list_for_each_entry_safe(obj, obj_safe, &req->objects, list)
1327*4882a593Smuzhiyun if (obj->ops->queue)
1328*4882a593Smuzhiyun obj->ops->queue(obj);
1329*4882a593Smuzhiyun }
1330*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vb2_request_queue);
1331*4882a593Smuzhiyun
1332*4882a593Smuzhiyun MODULE_DESCRIPTION("Driver helper framework for Video for Linux 2");
1333*4882a593Smuzhiyun MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski");
1334*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1335