xref: /OK3568_Linux_fs/kernel/drivers/media/v4l2-core/v4l2-async.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * V4L2 asynchronous subdevice registration API
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/device.h>
9*4882a593Smuzhiyun #include <linux/err.h>
10*4882a593Smuzhiyun #include <linux/i2c.h>
11*4882a593Smuzhiyun #include <linux/list.h>
12*4882a593Smuzhiyun #include <linux/mm.h>
13*4882a593Smuzhiyun #include <linux/module.h>
14*4882a593Smuzhiyun #include <linux/mutex.h>
15*4882a593Smuzhiyun #include <linux/of.h>
16*4882a593Smuzhiyun #include <linux/platform_device.h>
17*4882a593Smuzhiyun #include <linux/slab.h>
18*4882a593Smuzhiyun #include <linux/types.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #include <media/v4l2-async.h>
21*4882a593Smuzhiyun #include <media/v4l2-device.h>
22*4882a593Smuzhiyun #include <media/v4l2-fwnode.h>
23*4882a593Smuzhiyun #include <media/v4l2-subdev.h>
24*4882a593Smuzhiyun 
v4l2_async_notifier_call_bound(struct v4l2_async_notifier * n,struct v4l2_subdev * subdev,struct v4l2_async_subdev * asd)25*4882a593Smuzhiyun static int v4l2_async_notifier_call_bound(struct v4l2_async_notifier *n,
26*4882a593Smuzhiyun 					  struct v4l2_subdev *subdev,
27*4882a593Smuzhiyun 					  struct v4l2_async_subdev *asd)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun 	if (!n->ops || !n->ops->bound)
30*4882a593Smuzhiyun 		return 0;
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun 	return n->ops->bound(n, subdev, asd);
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun 
v4l2_async_notifier_call_unbind(struct v4l2_async_notifier * n,struct v4l2_subdev * subdev,struct v4l2_async_subdev * asd)35*4882a593Smuzhiyun static void v4l2_async_notifier_call_unbind(struct v4l2_async_notifier *n,
36*4882a593Smuzhiyun 					    struct v4l2_subdev *subdev,
37*4882a593Smuzhiyun 					    struct v4l2_async_subdev *asd)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun 	if (!n->ops || !n->ops->unbind)
40*4882a593Smuzhiyun 		return;
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	n->ops->unbind(n, subdev, asd);
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun 
v4l2_async_notifier_call_complete(struct v4l2_async_notifier * n)45*4882a593Smuzhiyun static int v4l2_async_notifier_call_complete(struct v4l2_async_notifier *n)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun 	if (!n->ops || !n->ops->complete)
48*4882a593Smuzhiyun 		return 0;
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 	return n->ops->complete(n);
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun 
match_i2c(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd,struct v4l2_async_subdev * asd)53*4882a593Smuzhiyun static bool match_i2c(struct v4l2_async_notifier *notifier,
54*4882a593Smuzhiyun 		      struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_I2C)
57*4882a593Smuzhiyun 	struct i2c_client *client = i2c_verify_client(sd->dev);
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	return client &&
60*4882a593Smuzhiyun 		asd->match.i2c.adapter_id == client->adapter->nr &&
61*4882a593Smuzhiyun 		asd->match.i2c.address == client->addr;
62*4882a593Smuzhiyun #else
63*4882a593Smuzhiyun 	return false;
64*4882a593Smuzhiyun #endif
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun 
match_devname(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd,struct v4l2_async_subdev * asd)67*4882a593Smuzhiyun static bool match_devname(struct v4l2_async_notifier *notifier,
68*4882a593Smuzhiyun 			  struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun 	return !strcmp(asd->match.device_name, dev_name(sd->dev));
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun 
match_fwnode(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd,struct v4l2_async_subdev * asd)73*4882a593Smuzhiyun static bool match_fwnode(struct v4l2_async_notifier *notifier,
74*4882a593Smuzhiyun 			 struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun 	struct fwnode_handle *other_fwnode;
77*4882a593Smuzhiyun 	struct fwnode_handle *dev_fwnode;
78*4882a593Smuzhiyun 	bool asd_fwnode_is_ep;
79*4882a593Smuzhiyun 	bool sd_fwnode_is_ep;
80*4882a593Smuzhiyun 	struct device *dev;
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	/*
83*4882a593Smuzhiyun 	 * Both the subdev and the async subdev can provide either an endpoint
84*4882a593Smuzhiyun 	 * fwnode or a device fwnode. Start with the simple case of direct
85*4882a593Smuzhiyun 	 * fwnode matching.
86*4882a593Smuzhiyun 	 */
87*4882a593Smuzhiyun 	if (sd->fwnode == asd->match.fwnode)
88*4882a593Smuzhiyun 		return true;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	/*
91*4882a593Smuzhiyun 	 * Otherwise, check if the sd fwnode and the asd fwnode refer to an
92*4882a593Smuzhiyun 	 * endpoint or a device. If they're of the same type, there's no match.
93*4882a593Smuzhiyun 	 * Technically speaking this checks if the nodes refer to a connected
94*4882a593Smuzhiyun 	 * endpoint, which is the simplest check that works for both OF and
95*4882a593Smuzhiyun 	 * ACPI. This won't make a difference, as drivers should not try to
96*4882a593Smuzhiyun 	 * match unconnected endpoints.
97*4882a593Smuzhiyun 	 */
98*4882a593Smuzhiyun 	sd_fwnode_is_ep = fwnode_graph_is_endpoint(sd->fwnode);
99*4882a593Smuzhiyun 	asd_fwnode_is_ep = fwnode_graph_is_endpoint(asd->match.fwnode);
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	if (sd_fwnode_is_ep == asd_fwnode_is_ep)
102*4882a593Smuzhiyun 		return false;
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	/*
105*4882a593Smuzhiyun 	 * The sd and asd fwnodes are of different types. Get the device fwnode
106*4882a593Smuzhiyun 	 * parent of the endpoint fwnode, and compare it with the other fwnode.
107*4882a593Smuzhiyun 	 */
108*4882a593Smuzhiyun 	if (sd_fwnode_is_ep) {
109*4882a593Smuzhiyun 		dev_fwnode = fwnode_graph_get_port_parent(sd->fwnode);
110*4882a593Smuzhiyun 		other_fwnode = asd->match.fwnode;
111*4882a593Smuzhiyun 	} else {
112*4882a593Smuzhiyun 		dev_fwnode = fwnode_graph_get_port_parent(asd->match.fwnode);
113*4882a593Smuzhiyun 		other_fwnode = sd->fwnode;
114*4882a593Smuzhiyun 	}
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	fwnode_handle_put(dev_fwnode);
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	if (dev_fwnode != other_fwnode)
119*4882a593Smuzhiyun 		return false;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	/*
122*4882a593Smuzhiyun 	 * We have a heterogeneous match. Retrieve the struct device of the side
123*4882a593Smuzhiyun 	 * that matched on a device fwnode to print its driver name.
124*4882a593Smuzhiyun 	 */
125*4882a593Smuzhiyun 	if (sd_fwnode_is_ep)
126*4882a593Smuzhiyun 		dev = notifier->v4l2_dev ? notifier->v4l2_dev->dev
127*4882a593Smuzhiyun 		    : notifier->sd->dev;
128*4882a593Smuzhiyun 	else
129*4882a593Smuzhiyun 		dev = sd->dev;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	if (dev && dev->driver) {
132*4882a593Smuzhiyun 		if (sd_fwnode_is_ep)
133*4882a593Smuzhiyun 			dev_warn(dev, "Driver %s uses device fwnode, incorrect match may occur\n",
134*4882a593Smuzhiyun 				 dev->driver->name);
135*4882a593Smuzhiyun 		dev_notice(dev, "Consider updating driver %s to match on endpoints\n",
136*4882a593Smuzhiyun 			   dev->driver->name);
137*4882a593Smuzhiyun 	}
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	return true;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun 
match_custom(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd,struct v4l2_async_subdev * asd)142*4882a593Smuzhiyun static bool match_custom(struct v4l2_async_notifier *notifier,
143*4882a593Smuzhiyun 			 struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun 	if (!asd->match.custom.match)
146*4882a593Smuzhiyun 		/* Match always */
147*4882a593Smuzhiyun 		return true;
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	return asd->match.custom.match(sd->dev, asd);
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun static LIST_HEAD(subdev_list);
153*4882a593Smuzhiyun static LIST_HEAD(notifier_list);
154*4882a593Smuzhiyun static DEFINE_MUTEX(list_lock);
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun static struct v4l2_async_subdev *
v4l2_async_find_match(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd)157*4882a593Smuzhiyun v4l2_async_find_match(struct v4l2_async_notifier *notifier,
158*4882a593Smuzhiyun 		      struct v4l2_subdev *sd)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun 	bool (*match)(struct v4l2_async_notifier *notifier,
161*4882a593Smuzhiyun 		      struct v4l2_subdev *sd, struct v4l2_async_subdev *asd);
162*4882a593Smuzhiyun 	struct v4l2_async_subdev *asd;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	list_for_each_entry(asd, &notifier->waiting, list) {
165*4882a593Smuzhiyun 		/* bus_type has been verified valid before */
166*4882a593Smuzhiyun 		switch (asd->match_type) {
167*4882a593Smuzhiyun 		case V4L2_ASYNC_MATCH_CUSTOM:
168*4882a593Smuzhiyun 			match = match_custom;
169*4882a593Smuzhiyun 			break;
170*4882a593Smuzhiyun 		case V4L2_ASYNC_MATCH_DEVNAME:
171*4882a593Smuzhiyun 			match = match_devname;
172*4882a593Smuzhiyun 			break;
173*4882a593Smuzhiyun 		case V4L2_ASYNC_MATCH_I2C:
174*4882a593Smuzhiyun 			match = match_i2c;
175*4882a593Smuzhiyun 			break;
176*4882a593Smuzhiyun 		case V4L2_ASYNC_MATCH_FWNODE:
177*4882a593Smuzhiyun 			match = match_fwnode;
178*4882a593Smuzhiyun 			break;
179*4882a593Smuzhiyun 		default:
180*4882a593Smuzhiyun 			/* Cannot happen, unless someone breaks us */
181*4882a593Smuzhiyun 			WARN_ON(true);
182*4882a593Smuzhiyun 			return NULL;
183*4882a593Smuzhiyun 		}
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 		/* match cannot be NULL here */
186*4882a593Smuzhiyun 		if (match(notifier, sd, asd))
187*4882a593Smuzhiyun 			return asd;
188*4882a593Smuzhiyun 	}
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	return NULL;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun /* Compare two async sub-device descriptors for equivalence */
asd_equal(struct v4l2_async_subdev * asd_x,struct v4l2_async_subdev * asd_y)194*4882a593Smuzhiyun static bool asd_equal(struct v4l2_async_subdev *asd_x,
195*4882a593Smuzhiyun 		      struct v4l2_async_subdev *asd_y)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun 	if (asd_x->match_type != asd_y->match_type)
198*4882a593Smuzhiyun 		return false;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	switch (asd_x->match_type) {
201*4882a593Smuzhiyun 	case V4L2_ASYNC_MATCH_DEVNAME:
202*4882a593Smuzhiyun 		return strcmp(asd_x->match.device_name,
203*4882a593Smuzhiyun 			      asd_y->match.device_name) == 0;
204*4882a593Smuzhiyun 	case V4L2_ASYNC_MATCH_I2C:
205*4882a593Smuzhiyun 		return asd_x->match.i2c.adapter_id ==
206*4882a593Smuzhiyun 			asd_y->match.i2c.adapter_id &&
207*4882a593Smuzhiyun 			asd_x->match.i2c.address ==
208*4882a593Smuzhiyun 			asd_y->match.i2c.address;
209*4882a593Smuzhiyun 	case V4L2_ASYNC_MATCH_FWNODE:
210*4882a593Smuzhiyun 		return asd_x->match.fwnode == asd_y->match.fwnode;
211*4882a593Smuzhiyun 	default:
212*4882a593Smuzhiyun 		break;
213*4882a593Smuzhiyun 	}
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	return false;
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun /* Find the sub-device notifier registered by a sub-device driver. */
219*4882a593Smuzhiyun static struct v4l2_async_notifier *
v4l2_async_find_subdev_notifier(struct v4l2_subdev * sd)220*4882a593Smuzhiyun v4l2_async_find_subdev_notifier(struct v4l2_subdev *sd)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun 	struct v4l2_async_notifier *n;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	list_for_each_entry(n, &notifier_list, list)
225*4882a593Smuzhiyun 		if (n->sd == sd)
226*4882a593Smuzhiyun 			return n;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	return NULL;
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun /* Get v4l2_device related to the notifier if one can be found. */
232*4882a593Smuzhiyun static struct v4l2_device *
v4l2_async_notifier_find_v4l2_dev(struct v4l2_async_notifier * notifier)233*4882a593Smuzhiyun v4l2_async_notifier_find_v4l2_dev(struct v4l2_async_notifier *notifier)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun 	while (notifier->parent)
236*4882a593Smuzhiyun 		notifier = notifier->parent;
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	return notifier->v4l2_dev;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun /*
242*4882a593Smuzhiyun  * Return true if all child sub-device notifiers are complete, false otherwise.
243*4882a593Smuzhiyun  */
244*4882a593Smuzhiyun static bool
v4l2_async_notifier_can_complete(struct v4l2_async_notifier * notifier)245*4882a593Smuzhiyun v4l2_async_notifier_can_complete(struct v4l2_async_notifier *notifier)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun 	struct v4l2_subdev *sd;
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	if (!list_empty(&notifier->waiting))
250*4882a593Smuzhiyun 		return false;
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	list_for_each_entry(sd, &notifier->done, async_list) {
253*4882a593Smuzhiyun 		struct v4l2_async_notifier *subdev_notifier =
254*4882a593Smuzhiyun 			v4l2_async_find_subdev_notifier(sd);
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 		if (subdev_notifier &&
257*4882a593Smuzhiyun 		    !v4l2_async_notifier_can_complete(subdev_notifier))
258*4882a593Smuzhiyun 			return false;
259*4882a593Smuzhiyun 	}
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	return true;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun /*
265*4882a593Smuzhiyun  * Complete the master notifier if possible. This is done when all async
266*4882a593Smuzhiyun  * sub-devices have been bound; v4l2_device is also available then.
267*4882a593Smuzhiyun  */
268*4882a593Smuzhiyun static int
v4l2_async_notifier_try_complete(struct v4l2_async_notifier * notifier)269*4882a593Smuzhiyun v4l2_async_notifier_try_complete(struct v4l2_async_notifier *notifier)
270*4882a593Smuzhiyun {
271*4882a593Smuzhiyun 	/* Quick check whether there are still more sub-devices here. */
272*4882a593Smuzhiyun 	if (!list_empty(&notifier->waiting))
273*4882a593Smuzhiyun 		return 0;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	/* Check the entire notifier tree; find the root notifier first. */
276*4882a593Smuzhiyun 	while (notifier->parent)
277*4882a593Smuzhiyun 		notifier = notifier->parent;
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	/* This is root if it has v4l2_dev. */
280*4882a593Smuzhiyun 	if (!notifier->v4l2_dev)
281*4882a593Smuzhiyun 		return 0;
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	/* Is everything ready? */
284*4882a593Smuzhiyun 	if (!v4l2_async_notifier_can_complete(notifier))
285*4882a593Smuzhiyun 		return 0;
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	return v4l2_async_notifier_call_complete(notifier);
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun static int
291*4882a593Smuzhiyun v4l2_async_notifier_try_all_subdevs(struct v4l2_async_notifier *notifier);
292*4882a593Smuzhiyun 
v4l2_async_match_notify(struct v4l2_async_notifier * notifier,struct v4l2_device * v4l2_dev,struct v4l2_subdev * sd,struct v4l2_async_subdev * asd)293*4882a593Smuzhiyun static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier,
294*4882a593Smuzhiyun 				   struct v4l2_device *v4l2_dev,
295*4882a593Smuzhiyun 				   struct v4l2_subdev *sd,
296*4882a593Smuzhiyun 				   struct v4l2_async_subdev *asd)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun 	struct v4l2_async_notifier *subdev_notifier;
299*4882a593Smuzhiyun 	int ret;
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	ret = v4l2_device_register_subdev(v4l2_dev, sd);
302*4882a593Smuzhiyun 	if (ret < 0)
303*4882a593Smuzhiyun 		return ret;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	ret = v4l2_async_notifier_call_bound(notifier, sd, asd);
306*4882a593Smuzhiyun 	if (ret < 0) {
307*4882a593Smuzhiyun 		v4l2_device_unregister_subdev(sd);
308*4882a593Smuzhiyun 		return ret;
309*4882a593Smuzhiyun 	}
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	/* Remove from the waiting list */
312*4882a593Smuzhiyun 	list_del(&asd->list);
313*4882a593Smuzhiyun 	sd->asd = asd;
314*4882a593Smuzhiyun 	sd->notifier = notifier;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	/* Move from the global subdevice list to notifier's done */
317*4882a593Smuzhiyun 	list_move(&sd->async_list, &notifier->done);
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	/*
320*4882a593Smuzhiyun 	 * See if the sub-device has a notifier. If not, return here.
321*4882a593Smuzhiyun 	 */
322*4882a593Smuzhiyun 	subdev_notifier = v4l2_async_find_subdev_notifier(sd);
323*4882a593Smuzhiyun 	if (!subdev_notifier || subdev_notifier->parent)
324*4882a593Smuzhiyun 		return 0;
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	/*
327*4882a593Smuzhiyun 	 * Proceed with checking for the sub-device notifier's async
328*4882a593Smuzhiyun 	 * sub-devices, and return the result. The error will be handled by the
329*4882a593Smuzhiyun 	 * caller.
330*4882a593Smuzhiyun 	 */
331*4882a593Smuzhiyun 	subdev_notifier->parent = notifier;
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	return v4l2_async_notifier_try_all_subdevs(subdev_notifier);
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun /* Test all async sub-devices in a notifier for a match. */
337*4882a593Smuzhiyun static int
v4l2_async_notifier_try_all_subdevs(struct v4l2_async_notifier * notifier)338*4882a593Smuzhiyun v4l2_async_notifier_try_all_subdevs(struct v4l2_async_notifier *notifier)
339*4882a593Smuzhiyun {
340*4882a593Smuzhiyun 	struct v4l2_device *v4l2_dev =
341*4882a593Smuzhiyun 		v4l2_async_notifier_find_v4l2_dev(notifier);
342*4882a593Smuzhiyun 	struct v4l2_subdev *sd;
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	if (!v4l2_dev)
345*4882a593Smuzhiyun 		return 0;
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun again:
348*4882a593Smuzhiyun 	list_for_each_entry(sd, &subdev_list, async_list) {
349*4882a593Smuzhiyun 		struct v4l2_async_subdev *asd;
350*4882a593Smuzhiyun 		int ret;
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 		asd = v4l2_async_find_match(notifier, sd);
353*4882a593Smuzhiyun 		if (!asd)
354*4882a593Smuzhiyun 			continue;
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 		ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd);
357*4882a593Smuzhiyun 		if (ret < 0)
358*4882a593Smuzhiyun 			return ret;
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 		/*
361*4882a593Smuzhiyun 		 * v4l2_async_match_notify() may lead to registering a
362*4882a593Smuzhiyun 		 * new notifier and thus changing the async subdevs
363*4882a593Smuzhiyun 		 * list. In order to proceed safely from here, restart
364*4882a593Smuzhiyun 		 * parsing the list from the beginning.
365*4882a593Smuzhiyun 		 */
366*4882a593Smuzhiyun 		goto again;
367*4882a593Smuzhiyun 	}
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	return 0;
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun 
v4l2_async_cleanup(struct v4l2_subdev * sd)372*4882a593Smuzhiyun static void v4l2_async_cleanup(struct v4l2_subdev *sd)
373*4882a593Smuzhiyun {
374*4882a593Smuzhiyun 	v4l2_device_unregister_subdev(sd);
375*4882a593Smuzhiyun 	/*
376*4882a593Smuzhiyun 	 * Subdevice driver will reprobe and put the subdev back
377*4882a593Smuzhiyun 	 * onto the list
378*4882a593Smuzhiyun 	 */
379*4882a593Smuzhiyun 	list_del_init(&sd->async_list);
380*4882a593Smuzhiyun 	sd->asd = NULL;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun /* Unbind all sub-devices in the notifier tree. */
384*4882a593Smuzhiyun static void
v4l2_async_notifier_unbind_all_subdevs(struct v4l2_async_notifier * notifier)385*4882a593Smuzhiyun v4l2_async_notifier_unbind_all_subdevs(struct v4l2_async_notifier *notifier)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun 	struct v4l2_subdev *sd, *tmp;
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	list_for_each_entry_safe(sd, tmp, &notifier->done, async_list) {
390*4882a593Smuzhiyun 		struct v4l2_async_notifier *subdev_notifier =
391*4882a593Smuzhiyun 			v4l2_async_find_subdev_notifier(sd);
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 		if (subdev_notifier)
394*4882a593Smuzhiyun 			v4l2_async_notifier_unbind_all_subdevs(subdev_notifier);
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 		v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
397*4882a593Smuzhiyun 		v4l2_async_cleanup(sd);
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 		list_move(&sd->async_list, &subdev_list);
400*4882a593Smuzhiyun 	}
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	notifier->parent = NULL;
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun /* See if an async sub-device can be found in a notifier's lists. */
406*4882a593Smuzhiyun static bool
__v4l2_async_notifier_has_async_subdev(struct v4l2_async_notifier * notifier,struct v4l2_async_subdev * asd)407*4882a593Smuzhiyun __v4l2_async_notifier_has_async_subdev(struct v4l2_async_notifier *notifier,
408*4882a593Smuzhiyun 				       struct v4l2_async_subdev *asd)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun 	struct v4l2_async_subdev *asd_y;
411*4882a593Smuzhiyun 	struct v4l2_subdev *sd;
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 	list_for_each_entry(asd_y, &notifier->waiting, list)
414*4882a593Smuzhiyun 		if (asd_equal(asd, asd_y))
415*4882a593Smuzhiyun 			return true;
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	list_for_each_entry(sd, &notifier->done, async_list) {
418*4882a593Smuzhiyun 		if (WARN_ON(!sd->asd))
419*4882a593Smuzhiyun 			continue;
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 		if (asd_equal(asd, sd->asd))
422*4882a593Smuzhiyun 			return true;
423*4882a593Smuzhiyun 	}
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	return false;
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun /*
429*4882a593Smuzhiyun  * Find out whether an async sub-device was set up already or
430*4882a593Smuzhiyun  * whether it exists in a given notifier before @this_index.
431*4882a593Smuzhiyun  * If @this_index < 0, search the notifier's entire @asd_list.
432*4882a593Smuzhiyun  */
433*4882a593Smuzhiyun static bool
v4l2_async_notifier_has_async_subdev(struct v4l2_async_notifier * notifier,struct v4l2_async_subdev * asd,int this_index)434*4882a593Smuzhiyun v4l2_async_notifier_has_async_subdev(struct v4l2_async_notifier *notifier,
435*4882a593Smuzhiyun 				     struct v4l2_async_subdev *asd,
436*4882a593Smuzhiyun 				     int this_index)
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun 	struct v4l2_async_subdev *asd_y;
439*4882a593Smuzhiyun 	int j = 0;
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 	lockdep_assert_held(&list_lock);
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	/* Check that an asd is not being added more than once. */
444*4882a593Smuzhiyun 	list_for_each_entry(asd_y, &notifier->asd_list, asd_list) {
445*4882a593Smuzhiyun 		if (this_index >= 0 && j++ >= this_index)
446*4882a593Smuzhiyun 			break;
447*4882a593Smuzhiyun 		if (asd_equal(asd, asd_y))
448*4882a593Smuzhiyun 			return true;
449*4882a593Smuzhiyun 	}
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	/* Check that an asd does not exist in other notifiers. */
452*4882a593Smuzhiyun 	list_for_each_entry(notifier, &notifier_list, list)
453*4882a593Smuzhiyun 		if (__v4l2_async_notifier_has_async_subdev(notifier, asd))
454*4882a593Smuzhiyun 			return true;
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	return false;
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun 
v4l2_async_notifier_asd_valid(struct v4l2_async_notifier * notifier,struct v4l2_async_subdev * asd,int this_index)459*4882a593Smuzhiyun static int v4l2_async_notifier_asd_valid(struct v4l2_async_notifier *notifier,
460*4882a593Smuzhiyun 					 struct v4l2_async_subdev *asd,
461*4882a593Smuzhiyun 					 int this_index)
462*4882a593Smuzhiyun {
463*4882a593Smuzhiyun 	struct device *dev =
464*4882a593Smuzhiyun 		notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL;
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	if (!asd)
467*4882a593Smuzhiyun 		return -EINVAL;
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	switch (asd->match_type) {
470*4882a593Smuzhiyun 	case V4L2_ASYNC_MATCH_CUSTOM:
471*4882a593Smuzhiyun 	case V4L2_ASYNC_MATCH_DEVNAME:
472*4882a593Smuzhiyun 	case V4L2_ASYNC_MATCH_I2C:
473*4882a593Smuzhiyun 	case V4L2_ASYNC_MATCH_FWNODE:
474*4882a593Smuzhiyun 		if (v4l2_async_notifier_has_async_subdev(notifier, asd,
475*4882a593Smuzhiyun 							 this_index)) {
476*4882a593Smuzhiyun 			dev_dbg(dev, "subdev descriptor already listed in this or other notifiers\n");
477*4882a593Smuzhiyun 			return -EEXIST;
478*4882a593Smuzhiyun 		}
479*4882a593Smuzhiyun 		break;
480*4882a593Smuzhiyun 	default:
481*4882a593Smuzhiyun 		dev_err(dev, "Invalid match type %u on %p\n",
482*4882a593Smuzhiyun 			asd->match_type, asd);
483*4882a593Smuzhiyun 		return -EINVAL;
484*4882a593Smuzhiyun 	}
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	return 0;
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun 
v4l2_async_notifier_init(struct v4l2_async_notifier * notifier)489*4882a593Smuzhiyun void v4l2_async_notifier_init(struct v4l2_async_notifier *notifier)
490*4882a593Smuzhiyun {
491*4882a593Smuzhiyun 	INIT_LIST_HEAD(&notifier->asd_list);
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun EXPORT_SYMBOL(v4l2_async_notifier_init);
494*4882a593Smuzhiyun 
__v4l2_async_notifier_register(struct v4l2_async_notifier * notifier)495*4882a593Smuzhiyun static int __v4l2_async_notifier_register(struct v4l2_async_notifier *notifier)
496*4882a593Smuzhiyun {
497*4882a593Smuzhiyun 	struct v4l2_async_subdev *asd;
498*4882a593Smuzhiyun 	int ret, i = 0;
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	INIT_LIST_HEAD(&notifier->waiting);
501*4882a593Smuzhiyun 	INIT_LIST_HEAD(&notifier->done);
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	mutex_lock(&list_lock);
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	list_for_each_entry(asd, &notifier->asd_list, asd_list) {
506*4882a593Smuzhiyun 		ret = v4l2_async_notifier_asd_valid(notifier, asd, i++);
507*4882a593Smuzhiyun 		if (ret)
508*4882a593Smuzhiyun 			goto err_unlock;
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 		list_add_tail(&asd->list, &notifier->waiting);
511*4882a593Smuzhiyun 	}
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 	ret = v4l2_async_notifier_try_all_subdevs(notifier);
514*4882a593Smuzhiyun 	if (ret < 0)
515*4882a593Smuzhiyun 		goto err_unbind;
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 	ret = v4l2_async_notifier_try_complete(notifier);
518*4882a593Smuzhiyun 	if (ret < 0)
519*4882a593Smuzhiyun 		goto err_unbind;
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	/* Keep also completed notifiers on the list */
522*4882a593Smuzhiyun 	list_add(&notifier->list, &notifier_list);
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	mutex_unlock(&list_lock);
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	return 0;
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun err_unbind:
529*4882a593Smuzhiyun 	/*
530*4882a593Smuzhiyun 	 * On failure, unbind all sub-devices registered through this notifier.
531*4882a593Smuzhiyun 	 */
532*4882a593Smuzhiyun 	v4l2_async_notifier_unbind_all_subdevs(notifier);
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun err_unlock:
535*4882a593Smuzhiyun 	mutex_unlock(&list_lock);
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	return ret;
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun 
v4l2_async_notifier_register(struct v4l2_device * v4l2_dev,struct v4l2_async_notifier * notifier)540*4882a593Smuzhiyun int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
541*4882a593Smuzhiyun 				 struct v4l2_async_notifier *notifier)
542*4882a593Smuzhiyun {
543*4882a593Smuzhiyun 	int ret;
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun 	if (WARN_ON(!v4l2_dev || notifier->sd))
546*4882a593Smuzhiyun 		return -EINVAL;
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 	notifier->v4l2_dev = v4l2_dev;
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	ret = __v4l2_async_notifier_register(notifier);
551*4882a593Smuzhiyun 	if (ret)
552*4882a593Smuzhiyun 		notifier->v4l2_dev = NULL;
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	return ret;
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun EXPORT_SYMBOL(v4l2_async_notifier_register);
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_NO_GKI)
__v4l2_async_notifier_clr_unready_dev(struct v4l2_async_notifier * notifier)559*4882a593Smuzhiyun static int __v4l2_async_notifier_clr_unready_dev(
560*4882a593Smuzhiyun 	struct v4l2_async_notifier *notifier)
561*4882a593Smuzhiyun {
562*4882a593Smuzhiyun 	struct v4l2_subdev *sd, *tmp;
563*4882a593Smuzhiyun 	int clr_num = 0;
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	list_for_each_entry_safe(sd, tmp, &notifier->done, async_list) {
566*4882a593Smuzhiyun 		struct v4l2_async_notifier *subdev_notifier =
567*4882a593Smuzhiyun 			v4l2_async_find_subdev_notifier(sd);
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 		if (subdev_notifier)
570*4882a593Smuzhiyun 			clr_num += __v4l2_async_notifier_clr_unready_dev(
571*4882a593Smuzhiyun 					subdev_notifier);
572*4882a593Smuzhiyun 	}
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 	list_for_each_entry_safe(sd, tmp, &notifier->waiting, async_list) {
575*4882a593Smuzhiyun 		list_del_init(&sd->async_list);
576*4882a593Smuzhiyun 		sd->asd = NULL;
577*4882a593Smuzhiyun 		sd->dev = NULL;
578*4882a593Smuzhiyun 		clr_num++;
579*4882a593Smuzhiyun 	}
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 	return clr_num;
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun 
v4l2_async_notifier_clr_unready_dev(struct v4l2_async_notifier * notifier)584*4882a593Smuzhiyun int v4l2_async_notifier_clr_unready_dev(struct v4l2_async_notifier *notifier)
585*4882a593Smuzhiyun {
586*4882a593Smuzhiyun 	int ret = 0;
587*4882a593Smuzhiyun 	int clr_num = 0;
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun 	mutex_lock(&list_lock);
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 	while (notifier->parent)
592*4882a593Smuzhiyun 		notifier = notifier->parent;
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	if (!notifier->v4l2_dev)
595*4882a593Smuzhiyun 		goto out;
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun 	clr_num = __v4l2_async_notifier_clr_unready_dev(notifier);
598*4882a593Smuzhiyun 	dev_info(notifier->v4l2_dev->dev,
599*4882a593Smuzhiyun 		 "clear unready subdev num: %d\n", clr_num);
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun 	if (clr_num > 0)
602*4882a593Smuzhiyun 		ret = v4l2_async_notifier_try_complete(notifier);
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun out:
605*4882a593Smuzhiyun 	mutex_unlock(&list_lock);
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	return ret;
608*4882a593Smuzhiyun }
609*4882a593Smuzhiyun EXPORT_SYMBOL(v4l2_async_notifier_clr_unready_dev);
610*4882a593Smuzhiyun #endif
611*4882a593Smuzhiyun 
v4l2_async_subdev_notifier_register(struct v4l2_subdev * sd,struct v4l2_async_notifier * notifier)612*4882a593Smuzhiyun int v4l2_async_subdev_notifier_register(struct v4l2_subdev *sd,
613*4882a593Smuzhiyun 					struct v4l2_async_notifier *notifier)
614*4882a593Smuzhiyun {
615*4882a593Smuzhiyun 	int ret;
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	if (WARN_ON(!sd || notifier->v4l2_dev))
618*4882a593Smuzhiyun 		return -EINVAL;
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 	notifier->sd = sd;
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun 	ret = __v4l2_async_notifier_register(notifier);
623*4882a593Smuzhiyun 	if (ret)
624*4882a593Smuzhiyun 		notifier->sd = NULL;
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 	return ret;
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun EXPORT_SYMBOL(v4l2_async_subdev_notifier_register);
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun static void
__v4l2_async_notifier_unregister(struct v4l2_async_notifier * notifier)631*4882a593Smuzhiyun __v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
632*4882a593Smuzhiyun {
633*4882a593Smuzhiyun 	if (!notifier || (!notifier->v4l2_dev && !notifier->sd))
634*4882a593Smuzhiyun 		return;
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun 	v4l2_async_notifier_unbind_all_subdevs(notifier);
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun 	notifier->sd = NULL;
639*4882a593Smuzhiyun 	notifier->v4l2_dev = NULL;
640*4882a593Smuzhiyun 
641*4882a593Smuzhiyun 	list_del(&notifier->list);
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun 
v4l2_async_notifier_unregister(struct v4l2_async_notifier * notifier)644*4882a593Smuzhiyun void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
645*4882a593Smuzhiyun {
646*4882a593Smuzhiyun 	mutex_lock(&list_lock);
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun 	__v4l2_async_notifier_unregister(notifier);
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun 	mutex_unlock(&list_lock);
651*4882a593Smuzhiyun }
652*4882a593Smuzhiyun EXPORT_SYMBOL(v4l2_async_notifier_unregister);
653*4882a593Smuzhiyun 
__v4l2_async_notifier_cleanup(struct v4l2_async_notifier * notifier)654*4882a593Smuzhiyun static void __v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier)
655*4882a593Smuzhiyun {
656*4882a593Smuzhiyun 	struct v4l2_async_subdev *asd, *tmp;
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun 	if (!notifier || !notifier->asd_list.next)
659*4882a593Smuzhiyun 		return;
660*4882a593Smuzhiyun 
661*4882a593Smuzhiyun 	list_for_each_entry_safe(asd, tmp, &notifier->asd_list, asd_list) {
662*4882a593Smuzhiyun 		switch (asd->match_type) {
663*4882a593Smuzhiyun 		case V4L2_ASYNC_MATCH_FWNODE:
664*4882a593Smuzhiyun 			fwnode_handle_put(asd->match.fwnode);
665*4882a593Smuzhiyun 			break;
666*4882a593Smuzhiyun 		default:
667*4882a593Smuzhiyun 			break;
668*4882a593Smuzhiyun 		}
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun 		list_del(&asd->asd_list);
671*4882a593Smuzhiyun 		kfree(asd);
672*4882a593Smuzhiyun 	}
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun 
v4l2_async_notifier_cleanup(struct v4l2_async_notifier * notifier)675*4882a593Smuzhiyun void v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier)
676*4882a593Smuzhiyun {
677*4882a593Smuzhiyun 	mutex_lock(&list_lock);
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 	__v4l2_async_notifier_cleanup(notifier);
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 	mutex_unlock(&list_lock);
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(v4l2_async_notifier_cleanup);
684*4882a593Smuzhiyun 
v4l2_async_notifier_add_subdev(struct v4l2_async_notifier * notifier,struct v4l2_async_subdev * asd)685*4882a593Smuzhiyun int v4l2_async_notifier_add_subdev(struct v4l2_async_notifier *notifier,
686*4882a593Smuzhiyun 				   struct v4l2_async_subdev *asd)
687*4882a593Smuzhiyun {
688*4882a593Smuzhiyun 	int ret;
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 	mutex_lock(&list_lock);
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 	ret = v4l2_async_notifier_asd_valid(notifier, asd, -1);
693*4882a593Smuzhiyun 	if (ret)
694*4882a593Smuzhiyun 		goto unlock;
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 	list_add_tail(&asd->asd_list, &notifier->asd_list);
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun unlock:
699*4882a593Smuzhiyun 	mutex_unlock(&list_lock);
700*4882a593Smuzhiyun 	return ret;
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_subdev);
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun struct v4l2_async_subdev *
v4l2_async_notifier_add_fwnode_subdev(struct v4l2_async_notifier * notifier,struct fwnode_handle * fwnode,unsigned int asd_struct_size)705*4882a593Smuzhiyun v4l2_async_notifier_add_fwnode_subdev(struct v4l2_async_notifier *notifier,
706*4882a593Smuzhiyun 				      struct fwnode_handle *fwnode,
707*4882a593Smuzhiyun 				      unsigned int asd_struct_size)
708*4882a593Smuzhiyun {
709*4882a593Smuzhiyun 	struct v4l2_async_subdev *asd;
710*4882a593Smuzhiyun 	int ret;
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun 	asd = kzalloc(asd_struct_size, GFP_KERNEL);
713*4882a593Smuzhiyun 	if (!asd)
714*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
715*4882a593Smuzhiyun 
716*4882a593Smuzhiyun 	asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
717*4882a593Smuzhiyun 	asd->match.fwnode = fwnode_handle_get(fwnode);
718*4882a593Smuzhiyun 
719*4882a593Smuzhiyun 	ret = v4l2_async_notifier_add_subdev(notifier, asd);
720*4882a593Smuzhiyun 	if (ret) {
721*4882a593Smuzhiyun 		fwnode_handle_put(fwnode);
722*4882a593Smuzhiyun 		kfree(asd);
723*4882a593Smuzhiyun 		return ERR_PTR(ret);
724*4882a593Smuzhiyun 	}
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun 	return asd;
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_fwnode_subdev);
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun struct v4l2_async_subdev *
v4l2_async_notifier_add_fwnode_remote_subdev(struct v4l2_async_notifier * notif,struct fwnode_handle * endpoint,unsigned int asd_struct_size)731*4882a593Smuzhiyun v4l2_async_notifier_add_fwnode_remote_subdev(struct v4l2_async_notifier *notif,
732*4882a593Smuzhiyun 					     struct fwnode_handle *endpoint,
733*4882a593Smuzhiyun 					     unsigned int asd_struct_size)
734*4882a593Smuzhiyun {
735*4882a593Smuzhiyun 	struct v4l2_async_subdev *asd;
736*4882a593Smuzhiyun 	struct fwnode_handle *remote;
737*4882a593Smuzhiyun 
738*4882a593Smuzhiyun 	remote = fwnode_graph_get_remote_port_parent(endpoint);
739*4882a593Smuzhiyun 	if (!remote)
740*4882a593Smuzhiyun 		return ERR_PTR(-ENOTCONN);
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 	asd = v4l2_async_notifier_add_fwnode_subdev(notif, remote,
743*4882a593Smuzhiyun 						    asd_struct_size);
744*4882a593Smuzhiyun 	/*
745*4882a593Smuzhiyun 	 * Calling v4l2_async_notifier_add_fwnode_subdev grabs a refcount,
746*4882a593Smuzhiyun 	 * so drop the one we got in fwnode_graph_get_remote_port_parent.
747*4882a593Smuzhiyun 	 */
748*4882a593Smuzhiyun 	fwnode_handle_put(remote);
749*4882a593Smuzhiyun 	return asd;
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_fwnode_remote_subdev);
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun struct v4l2_async_subdev *
v4l2_async_notifier_add_i2c_subdev(struct v4l2_async_notifier * notifier,int adapter_id,unsigned short address,unsigned int asd_struct_size)754*4882a593Smuzhiyun v4l2_async_notifier_add_i2c_subdev(struct v4l2_async_notifier *notifier,
755*4882a593Smuzhiyun 				   int adapter_id, unsigned short address,
756*4882a593Smuzhiyun 				   unsigned int asd_struct_size)
757*4882a593Smuzhiyun {
758*4882a593Smuzhiyun 	struct v4l2_async_subdev *asd;
759*4882a593Smuzhiyun 	int ret;
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun 	asd = kzalloc(asd_struct_size, GFP_KERNEL);
762*4882a593Smuzhiyun 	if (!asd)
763*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun 	asd->match_type = V4L2_ASYNC_MATCH_I2C;
766*4882a593Smuzhiyun 	asd->match.i2c.adapter_id = adapter_id;
767*4882a593Smuzhiyun 	asd->match.i2c.address = address;
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun 	ret = v4l2_async_notifier_add_subdev(notifier, asd);
770*4882a593Smuzhiyun 	if (ret) {
771*4882a593Smuzhiyun 		kfree(asd);
772*4882a593Smuzhiyun 		return ERR_PTR(ret);
773*4882a593Smuzhiyun 	}
774*4882a593Smuzhiyun 
775*4882a593Smuzhiyun 	return asd;
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_i2c_subdev);
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun struct v4l2_async_subdev *
v4l2_async_notifier_add_devname_subdev(struct v4l2_async_notifier * notifier,const char * device_name,unsigned int asd_struct_size)780*4882a593Smuzhiyun v4l2_async_notifier_add_devname_subdev(struct v4l2_async_notifier *notifier,
781*4882a593Smuzhiyun 				       const char *device_name,
782*4882a593Smuzhiyun 				       unsigned int asd_struct_size)
783*4882a593Smuzhiyun {
784*4882a593Smuzhiyun 	struct v4l2_async_subdev *asd;
785*4882a593Smuzhiyun 	int ret;
786*4882a593Smuzhiyun 
787*4882a593Smuzhiyun 	asd = kzalloc(asd_struct_size, GFP_KERNEL);
788*4882a593Smuzhiyun 	if (!asd)
789*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
790*4882a593Smuzhiyun 
791*4882a593Smuzhiyun 	asd->match_type = V4L2_ASYNC_MATCH_DEVNAME;
792*4882a593Smuzhiyun 	asd->match.device_name = device_name;
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun 	ret = v4l2_async_notifier_add_subdev(notifier, asd);
795*4882a593Smuzhiyun 	if (ret) {
796*4882a593Smuzhiyun 		kfree(asd);
797*4882a593Smuzhiyun 		return ERR_PTR(ret);
798*4882a593Smuzhiyun 	}
799*4882a593Smuzhiyun 
800*4882a593Smuzhiyun 	return asd;
801*4882a593Smuzhiyun }
802*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_devname_subdev);
803*4882a593Smuzhiyun 
v4l2_async_register_subdev(struct v4l2_subdev * sd)804*4882a593Smuzhiyun int v4l2_async_register_subdev(struct v4l2_subdev *sd)
805*4882a593Smuzhiyun {
806*4882a593Smuzhiyun 	struct v4l2_async_notifier *subdev_notifier;
807*4882a593Smuzhiyun 	struct v4l2_async_notifier *notifier;
808*4882a593Smuzhiyun 	int ret;
809*4882a593Smuzhiyun 
810*4882a593Smuzhiyun 	/*
811*4882a593Smuzhiyun 	 * No reference taken. The reference is held by the device
812*4882a593Smuzhiyun 	 * (struct v4l2_subdev.dev), and async sub-device does not
813*4882a593Smuzhiyun 	 * exist independently of the device at any point of time.
814*4882a593Smuzhiyun 	 */
815*4882a593Smuzhiyun 	if (!sd->fwnode && sd->dev)
816*4882a593Smuzhiyun 		sd->fwnode = dev_fwnode(sd->dev);
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun 	mutex_lock(&list_lock);
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun 	INIT_LIST_HEAD(&sd->async_list);
821*4882a593Smuzhiyun 
822*4882a593Smuzhiyun 	list_for_each_entry(notifier, &notifier_list, list) {
823*4882a593Smuzhiyun 		struct v4l2_device *v4l2_dev =
824*4882a593Smuzhiyun 			v4l2_async_notifier_find_v4l2_dev(notifier);
825*4882a593Smuzhiyun 		struct v4l2_async_subdev *asd;
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun 		if (!v4l2_dev)
828*4882a593Smuzhiyun 			continue;
829*4882a593Smuzhiyun 
830*4882a593Smuzhiyun 		asd = v4l2_async_find_match(notifier, sd);
831*4882a593Smuzhiyun 		if (!asd)
832*4882a593Smuzhiyun 			continue;
833*4882a593Smuzhiyun 
834*4882a593Smuzhiyun 		ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd);
835*4882a593Smuzhiyun 		if (ret)
836*4882a593Smuzhiyun 			goto err_unbind;
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun 		ret = v4l2_async_notifier_try_complete(notifier);
839*4882a593Smuzhiyun 		if (ret)
840*4882a593Smuzhiyun 			goto err_unbind;
841*4882a593Smuzhiyun 
842*4882a593Smuzhiyun 		goto out_unlock;
843*4882a593Smuzhiyun 	}
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun 	/* None matched, wait for hot-plugging */
846*4882a593Smuzhiyun 	list_add(&sd->async_list, &subdev_list);
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun out_unlock:
849*4882a593Smuzhiyun 	mutex_unlock(&list_lock);
850*4882a593Smuzhiyun 
851*4882a593Smuzhiyun 	return 0;
852*4882a593Smuzhiyun 
853*4882a593Smuzhiyun err_unbind:
854*4882a593Smuzhiyun 	/*
855*4882a593Smuzhiyun 	 * Complete failed. Unbind the sub-devices bound through registering
856*4882a593Smuzhiyun 	 * this async sub-device.
857*4882a593Smuzhiyun 	 */
858*4882a593Smuzhiyun 	subdev_notifier = v4l2_async_find_subdev_notifier(sd);
859*4882a593Smuzhiyun 	if (subdev_notifier)
860*4882a593Smuzhiyun 		v4l2_async_notifier_unbind_all_subdevs(subdev_notifier);
861*4882a593Smuzhiyun 
862*4882a593Smuzhiyun 	if (sd->asd)
863*4882a593Smuzhiyun 		v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
864*4882a593Smuzhiyun 	v4l2_async_cleanup(sd);
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun 	mutex_unlock(&list_lock);
867*4882a593Smuzhiyun 
868*4882a593Smuzhiyun 	return ret;
869*4882a593Smuzhiyun }
870*4882a593Smuzhiyun EXPORT_SYMBOL(v4l2_async_register_subdev);
871*4882a593Smuzhiyun 
v4l2_async_unregister_subdev(struct v4l2_subdev * sd)872*4882a593Smuzhiyun void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
873*4882a593Smuzhiyun {
874*4882a593Smuzhiyun 	mutex_lock(&list_lock);
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun 	__v4l2_async_notifier_unregister(sd->subdev_notifier);
877*4882a593Smuzhiyun 	__v4l2_async_notifier_cleanup(sd->subdev_notifier);
878*4882a593Smuzhiyun 	kfree(sd->subdev_notifier);
879*4882a593Smuzhiyun 	sd->subdev_notifier = NULL;
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun 	if (sd->asd) {
882*4882a593Smuzhiyun 		struct v4l2_async_notifier *notifier = sd->notifier;
883*4882a593Smuzhiyun 
884*4882a593Smuzhiyun 		list_add(&sd->asd->list, &notifier->waiting);
885*4882a593Smuzhiyun 
886*4882a593Smuzhiyun 		v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
887*4882a593Smuzhiyun 	}
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun 	v4l2_async_cleanup(sd);
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun 	mutex_unlock(&list_lock);
892*4882a593Smuzhiyun }
893*4882a593Smuzhiyun EXPORT_SYMBOL(v4l2_async_unregister_subdev);
894