xref: /OK3568_Linux_fs/kernel/drivers/media/v4l2-core/v4l2-event.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * v4l2-event.c
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * V4L2 events.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Copyright (C) 2009--2010 Nokia Corporation.
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * Contact: Sakari Ailus <sakari.ailus@iki.fi>
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <media/v4l2-dev.h>
13*4882a593Smuzhiyun #include <media/v4l2-fh.h>
14*4882a593Smuzhiyun #include <media/v4l2-event.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #include <linux/mm.h>
17*4882a593Smuzhiyun #include <linux/sched.h>
18*4882a593Smuzhiyun #include <linux/slab.h>
19*4882a593Smuzhiyun #include <linux/export.h>
20*4882a593Smuzhiyun 
sev_pos(const struct v4l2_subscribed_event * sev,unsigned idx)21*4882a593Smuzhiyun static unsigned sev_pos(const struct v4l2_subscribed_event *sev, unsigned idx)
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun 	idx += sev->first;
24*4882a593Smuzhiyun 	return idx >= sev->elems ? idx - sev->elems : idx;
25*4882a593Smuzhiyun }
26*4882a593Smuzhiyun 
__v4l2_event_dequeue(struct v4l2_fh * fh,struct v4l2_event * event)27*4882a593Smuzhiyun static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun 	struct v4l2_kevent *kev;
30*4882a593Smuzhiyun 	struct timespec64 ts;
31*4882a593Smuzhiyun 	unsigned long flags;
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun 	spin_lock_irqsave(&fh->vdev->fh_lock, flags);
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun 	if (list_empty(&fh->available)) {
36*4882a593Smuzhiyun 		spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
37*4882a593Smuzhiyun 		return -ENOENT;
38*4882a593Smuzhiyun 	}
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	WARN_ON(fh->navailable == 0);
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	kev = list_first_entry(&fh->available, struct v4l2_kevent, list);
43*4882a593Smuzhiyun 	list_del(&kev->list);
44*4882a593Smuzhiyun 	fh->navailable--;
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 	kev->event.pending = fh->navailable;
47*4882a593Smuzhiyun 	*event = kev->event;
48*4882a593Smuzhiyun 	ts = ns_to_timespec64(kev->ts);
49*4882a593Smuzhiyun 	event->timestamp.tv_sec = ts.tv_sec;
50*4882a593Smuzhiyun 	event->timestamp.tv_nsec = ts.tv_nsec;
51*4882a593Smuzhiyun 	kev->sev->first = sev_pos(kev->sev, 1);
52*4882a593Smuzhiyun 	kev->sev->in_use--;
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	return 0;
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun 
v4l2_event_dequeue(struct v4l2_fh * fh,struct v4l2_event * event,int nonblocking)59*4882a593Smuzhiyun int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
60*4882a593Smuzhiyun 		       int nonblocking)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun 	int ret;
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	if (nonblocking)
65*4882a593Smuzhiyun 		return __v4l2_event_dequeue(fh, event);
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	/* Release the vdev lock while waiting */
68*4882a593Smuzhiyun 	if (fh->vdev->lock)
69*4882a593Smuzhiyun 		mutex_unlock(fh->vdev->lock);
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	do {
72*4882a593Smuzhiyun 		ret = wait_event_interruptible(fh->wait,
73*4882a593Smuzhiyun 					       fh->navailable != 0);
74*4882a593Smuzhiyun 		if (ret < 0)
75*4882a593Smuzhiyun 			break;
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 		ret = __v4l2_event_dequeue(fh, event);
78*4882a593Smuzhiyun 	} while (ret == -ENOENT);
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	if (fh->vdev->lock)
81*4882a593Smuzhiyun 		mutex_lock(fh->vdev->lock);
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	return ret;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun /* Caller must hold fh->vdev->fh_lock! */
v4l2_event_subscribed(struct v4l2_fh * fh,u32 type,u32 id)88*4882a593Smuzhiyun static struct v4l2_subscribed_event *v4l2_event_subscribed(
89*4882a593Smuzhiyun 		struct v4l2_fh *fh, u32 type, u32 id)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun 	struct v4l2_subscribed_event *sev;
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	assert_spin_locked(&fh->vdev->fh_lock);
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	list_for_each_entry(sev, &fh->subscribed, list)
96*4882a593Smuzhiyun 		if (sev->type == type && sev->id == id)
97*4882a593Smuzhiyun 			return sev;
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	return NULL;
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun 
__v4l2_event_queue_fh(struct v4l2_fh * fh,const struct v4l2_event * ev,u64 ts)102*4882a593Smuzhiyun static void __v4l2_event_queue_fh(struct v4l2_fh *fh,
103*4882a593Smuzhiyun 				  const struct v4l2_event *ev, u64 ts)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun 	struct v4l2_subscribed_event *sev;
106*4882a593Smuzhiyun 	struct v4l2_kevent *kev;
107*4882a593Smuzhiyun 	bool copy_payload = true;
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	/* Are we subscribed? */
110*4882a593Smuzhiyun 	sev = v4l2_event_subscribed(fh, ev->type, ev->id);
111*4882a593Smuzhiyun 	if (sev == NULL)
112*4882a593Smuzhiyun 		return;
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	/* Increase event sequence number on fh. */
115*4882a593Smuzhiyun 	fh->sequence++;
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	/* Do we have any free events? */
118*4882a593Smuzhiyun 	if (sev->in_use == sev->elems) {
119*4882a593Smuzhiyun 		/* no, remove the oldest one */
120*4882a593Smuzhiyun 		kev = sev->events + sev_pos(sev, 0);
121*4882a593Smuzhiyun 		list_del(&kev->list);
122*4882a593Smuzhiyun 		sev->in_use--;
123*4882a593Smuzhiyun 		sev->first = sev_pos(sev, 1);
124*4882a593Smuzhiyun 		fh->navailable--;
125*4882a593Smuzhiyun 		if (sev->elems == 1) {
126*4882a593Smuzhiyun 			if (sev->ops && sev->ops->replace) {
127*4882a593Smuzhiyun 				sev->ops->replace(&kev->event, ev);
128*4882a593Smuzhiyun 				copy_payload = false;
129*4882a593Smuzhiyun 			}
130*4882a593Smuzhiyun 		} else if (sev->ops && sev->ops->merge) {
131*4882a593Smuzhiyun 			struct v4l2_kevent *second_oldest =
132*4882a593Smuzhiyun 				sev->events + sev_pos(sev, 0);
133*4882a593Smuzhiyun 			sev->ops->merge(&kev->event, &second_oldest->event);
134*4882a593Smuzhiyun 		}
135*4882a593Smuzhiyun 	}
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	/* Take one and fill it. */
138*4882a593Smuzhiyun 	kev = sev->events + sev_pos(sev, sev->in_use);
139*4882a593Smuzhiyun 	kev->event.type = ev->type;
140*4882a593Smuzhiyun 	if (copy_payload)
141*4882a593Smuzhiyun 		kev->event.u = ev->u;
142*4882a593Smuzhiyun 	kev->event.id = ev->id;
143*4882a593Smuzhiyun 	kev->ts = ts;
144*4882a593Smuzhiyun 	kev->event.sequence = fh->sequence;
145*4882a593Smuzhiyun 	sev->in_use++;
146*4882a593Smuzhiyun 	list_add_tail(&kev->list, &fh->available);
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	fh->navailable++;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	wake_up_all(&fh->wait);
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun 
v4l2_event_queue(struct video_device * vdev,const struct v4l2_event * ev)153*4882a593Smuzhiyun void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun 	struct v4l2_fh *fh;
156*4882a593Smuzhiyun 	unsigned long flags;
157*4882a593Smuzhiyun 	u64 ts;
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	if (vdev == NULL)
160*4882a593Smuzhiyun 		return;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	ts = ktime_get_ns();
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	spin_lock_irqsave(&vdev->fh_lock, flags);
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	list_for_each_entry(fh, &vdev->fh_list, list)
167*4882a593Smuzhiyun 		__v4l2_event_queue_fh(fh, ev, ts);
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	spin_unlock_irqrestore(&vdev->fh_lock, flags);
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(v4l2_event_queue);
172*4882a593Smuzhiyun 
v4l2_event_queue_fh(struct v4l2_fh * fh,const struct v4l2_event * ev)173*4882a593Smuzhiyun void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun 	unsigned long flags;
176*4882a593Smuzhiyun 	u64 ts = ktime_get_ns();
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	spin_lock_irqsave(&fh->vdev->fh_lock, flags);
179*4882a593Smuzhiyun 	__v4l2_event_queue_fh(fh, ev, ts);
180*4882a593Smuzhiyun 	spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(v4l2_event_queue_fh);
183*4882a593Smuzhiyun 
v4l2_event_pending(struct v4l2_fh * fh)184*4882a593Smuzhiyun int v4l2_event_pending(struct v4l2_fh *fh)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun 	return fh->navailable;
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(v4l2_event_pending);
189*4882a593Smuzhiyun 
__v4l2_event_unsubscribe(struct v4l2_subscribed_event * sev)190*4882a593Smuzhiyun static void __v4l2_event_unsubscribe(struct v4l2_subscribed_event *sev)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun 	struct v4l2_fh *fh = sev->fh;
193*4882a593Smuzhiyun 	unsigned int i;
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	lockdep_assert_held(&fh->subscribe_lock);
196*4882a593Smuzhiyun 	assert_spin_locked(&fh->vdev->fh_lock);
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	/* Remove any pending events for this subscription */
199*4882a593Smuzhiyun 	for (i = 0; i < sev->in_use; i++) {
200*4882a593Smuzhiyun 		list_del(&sev->events[sev_pos(sev, i)].list);
201*4882a593Smuzhiyun 		fh->navailable--;
202*4882a593Smuzhiyun 	}
203*4882a593Smuzhiyun 	list_del(&sev->list);
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun 
v4l2_event_subscribe(struct v4l2_fh * fh,const struct v4l2_event_subscription * sub,unsigned elems,const struct v4l2_subscribed_event_ops * ops)206*4882a593Smuzhiyun int v4l2_event_subscribe(struct v4l2_fh *fh,
207*4882a593Smuzhiyun 			 const struct v4l2_event_subscription *sub, unsigned elems,
208*4882a593Smuzhiyun 			 const struct v4l2_subscribed_event_ops *ops)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun 	struct v4l2_subscribed_event *sev, *found_ev;
211*4882a593Smuzhiyun 	unsigned long flags;
212*4882a593Smuzhiyun 	unsigned i;
213*4882a593Smuzhiyun 	int ret = 0;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	if (sub->type == V4L2_EVENT_ALL)
216*4882a593Smuzhiyun 		return -EINVAL;
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	if (elems < 1)
219*4882a593Smuzhiyun 		elems = 1;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	sev = kvzalloc(struct_size(sev, events, elems), GFP_KERNEL);
222*4882a593Smuzhiyun 	if (!sev)
223*4882a593Smuzhiyun 		return -ENOMEM;
224*4882a593Smuzhiyun 	for (i = 0; i < elems; i++)
225*4882a593Smuzhiyun 		sev->events[i].sev = sev;
226*4882a593Smuzhiyun 	sev->type = sub->type;
227*4882a593Smuzhiyun 	sev->id = sub->id;
228*4882a593Smuzhiyun 	sev->flags = sub->flags;
229*4882a593Smuzhiyun 	sev->fh = fh;
230*4882a593Smuzhiyun 	sev->ops = ops;
231*4882a593Smuzhiyun 	sev->elems = elems;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	mutex_lock(&fh->subscribe_lock);
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	spin_lock_irqsave(&fh->vdev->fh_lock, flags);
236*4882a593Smuzhiyun 	found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
237*4882a593Smuzhiyun 	if (!found_ev)
238*4882a593Smuzhiyun 		list_add(&sev->list, &fh->subscribed);
239*4882a593Smuzhiyun 	spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	if (found_ev) {
242*4882a593Smuzhiyun 		/* Already listening */
243*4882a593Smuzhiyun 		kvfree(sev);
244*4882a593Smuzhiyun 	} else if (sev->ops && sev->ops->add) {
245*4882a593Smuzhiyun 		ret = sev->ops->add(sev, elems);
246*4882a593Smuzhiyun 		if (ret) {
247*4882a593Smuzhiyun 			spin_lock_irqsave(&fh->vdev->fh_lock, flags);
248*4882a593Smuzhiyun 			__v4l2_event_unsubscribe(sev);
249*4882a593Smuzhiyun 			spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
250*4882a593Smuzhiyun 			kvfree(sev);
251*4882a593Smuzhiyun 		}
252*4882a593Smuzhiyun 	}
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	mutex_unlock(&fh->subscribe_lock);
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	return ret;
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
259*4882a593Smuzhiyun 
v4l2_event_unsubscribe_all(struct v4l2_fh * fh)260*4882a593Smuzhiyun void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun 	struct v4l2_event_subscription sub;
263*4882a593Smuzhiyun 	struct v4l2_subscribed_event *sev;
264*4882a593Smuzhiyun 	unsigned long flags;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	do {
267*4882a593Smuzhiyun 		sev = NULL;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 		spin_lock_irqsave(&fh->vdev->fh_lock, flags);
270*4882a593Smuzhiyun 		if (!list_empty(&fh->subscribed)) {
271*4882a593Smuzhiyun 			sev = list_first_entry(&fh->subscribed,
272*4882a593Smuzhiyun 					struct v4l2_subscribed_event, list);
273*4882a593Smuzhiyun 			sub.type = sev->type;
274*4882a593Smuzhiyun 			sub.id = sev->id;
275*4882a593Smuzhiyun 		}
276*4882a593Smuzhiyun 		spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
277*4882a593Smuzhiyun 		if (sev)
278*4882a593Smuzhiyun 			v4l2_event_unsubscribe(fh, &sub);
279*4882a593Smuzhiyun 	} while (sev);
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all);
282*4882a593Smuzhiyun 
v4l2_event_unsubscribe(struct v4l2_fh * fh,const struct v4l2_event_subscription * sub)283*4882a593Smuzhiyun int v4l2_event_unsubscribe(struct v4l2_fh *fh,
284*4882a593Smuzhiyun 			   const struct v4l2_event_subscription *sub)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun 	struct v4l2_subscribed_event *sev;
287*4882a593Smuzhiyun 	unsigned long flags;
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	if (sub->type == V4L2_EVENT_ALL) {
290*4882a593Smuzhiyun 		v4l2_event_unsubscribe_all(fh);
291*4882a593Smuzhiyun 		return 0;
292*4882a593Smuzhiyun 	}
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	mutex_lock(&fh->subscribe_lock);
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	spin_lock_irqsave(&fh->vdev->fh_lock, flags);
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	sev = v4l2_event_subscribed(fh, sub->type, sub->id);
299*4882a593Smuzhiyun 	if (sev != NULL)
300*4882a593Smuzhiyun 		__v4l2_event_unsubscribe(sev);
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	if (sev && sev->ops && sev->ops->del)
305*4882a593Smuzhiyun 		sev->ops->del(sev);
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	mutex_unlock(&fh->subscribe_lock);
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	kvfree(sev);
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	return 0;
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe);
314*4882a593Smuzhiyun 
v4l2_event_subdev_unsubscribe(struct v4l2_subdev * sd,struct v4l2_fh * fh,struct v4l2_event_subscription * sub)315*4882a593Smuzhiyun int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, struct v4l2_fh *fh,
316*4882a593Smuzhiyun 				  struct v4l2_event_subscription *sub)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun 	return v4l2_event_unsubscribe(fh, sub);
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(v4l2_event_subdev_unsubscribe);
321*4882a593Smuzhiyun 
v4l2_event_src_replace(struct v4l2_event * old,const struct v4l2_event * new)322*4882a593Smuzhiyun static void v4l2_event_src_replace(struct v4l2_event *old,
323*4882a593Smuzhiyun 				const struct v4l2_event *new)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun 	u32 old_changes = old->u.src_change.changes;
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	old->u.src_change = new->u.src_change;
328*4882a593Smuzhiyun 	old->u.src_change.changes |= old_changes;
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun 
v4l2_event_src_merge(const struct v4l2_event * old,struct v4l2_event * new)331*4882a593Smuzhiyun static void v4l2_event_src_merge(const struct v4l2_event *old,
332*4882a593Smuzhiyun 				struct v4l2_event *new)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun 	new->u.src_change.changes |= old->u.src_change.changes;
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun static const struct v4l2_subscribed_event_ops v4l2_event_src_ch_ops = {
338*4882a593Smuzhiyun 	.replace = v4l2_event_src_replace,
339*4882a593Smuzhiyun 	.merge = v4l2_event_src_merge,
340*4882a593Smuzhiyun };
341*4882a593Smuzhiyun 
v4l2_src_change_event_subscribe(struct v4l2_fh * fh,const struct v4l2_event_subscription * sub)342*4882a593Smuzhiyun int v4l2_src_change_event_subscribe(struct v4l2_fh *fh,
343*4882a593Smuzhiyun 				const struct v4l2_event_subscription *sub)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun 	if (sub->type == V4L2_EVENT_SOURCE_CHANGE)
346*4882a593Smuzhiyun 		return v4l2_event_subscribe(fh, sub, 0, &v4l2_event_src_ch_ops);
347*4882a593Smuzhiyun 	return -EINVAL;
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(v4l2_src_change_event_subscribe);
350*4882a593Smuzhiyun 
v4l2_src_change_event_subdev_subscribe(struct v4l2_subdev * sd,struct v4l2_fh * fh,struct v4l2_event_subscription * sub)351*4882a593Smuzhiyun int v4l2_src_change_event_subdev_subscribe(struct v4l2_subdev *sd,
352*4882a593Smuzhiyun 		struct v4l2_fh *fh, struct v4l2_event_subscription *sub)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun 	return v4l2_src_change_event_subscribe(fh, sub);
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(v4l2_src_change_event_subdev_subscribe);
357