xref: /OK3568_Linux_fs/kernel/drivers/base/attribute_container.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * attribute_container.c - implementation of a simple container for classes
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (c) 2005 - James Bottomley <James.Bottomley@steeleye.com>
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * The basic idea here is to enable a device to be attached to an
8*4882a593Smuzhiyun  * aritrary numer of classes without having to allocate storage for them.
9*4882a593Smuzhiyun  * Instead, the contained classes select the devices they need to attach
10*4882a593Smuzhiyun  * to via a matching function.
11*4882a593Smuzhiyun  */
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <linux/attribute_container.h>
14*4882a593Smuzhiyun #include <linux/device.h>
15*4882a593Smuzhiyun #include <linux/kernel.h>
16*4882a593Smuzhiyun #include <linux/slab.h>
17*4882a593Smuzhiyun #include <linux/list.h>
18*4882a593Smuzhiyun #include <linux/module.h>
19*4882a593Smuzhiyun #include <linux/mutex.h>
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #include "base.h"
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun /* This is a private structure used to tie the classdev and the
24*4882a593Smuzhiyun  * container .. it should never be visible outside this file */
25*4882a593Smuzhiyun struct internal_container {
26*4882a593Smuzhiyun 	struct klist_node node;
27*4882a593Smuzhiyun 	struct attribute_container *cont;
28*4882a593Smuzhiyun 	struct device classdev;
29*4882a593Smuzhiyun };
30*4882a593Smuzhiyun 
internal_container_klist_get(struct klist_node * n)31*4882a593Smuzhiyun static void internal_container_klist_get(struct klist_node *n)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun 	struct internal_container *ic =
34*4882a593Smuzhiyun 		container_of(n, struct internal_container, node);
35*4882a593Smuzhiyun 	get_device(&ic->classdev);
36*4882a593Smuzhiyun }
37*4882a593Smuzhiyun 
internal_container_klist_put(struct klist_node * n)38*4882a593Smuzhiyun static void internal_container_klist_put(struct klist_node *n)
39*4882a593Smuzhiyun {
40*4882a593Smuzhiyun 	struct internal_container *ic =
41*4882a593Smuzhiyun 		container_of(n, struct internal_container, node);
42*4882a593Smuzhiyun 	put_device(&ic->classdev);
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun /**
47*4882a593Smuzhiyun  * attribute_container_classdev_to_container - given a classdev, return the container
48*4882a593Smuzhiyun  *
49*4882a593Smuzhiyun  * @classdev: the class device created by attribute_container_add_device.
50*4882a593Smuzhiyun  *
51*4882a593Smuzhiyun  * Returns the container associated with this classdev.
52*4882a593Smuzhiyun  */
53*4882a593Smuzhiyun struct attribute_container *
attribute_container_classdev_to_container(struct device * classdev)54*4882a593Smuzhiyun attribute_container_classdev_to_container(struct device *classdev)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun 	struct internal_container *ic =
57*4882a593Smuzhiyun 		container_of(classdev, struct internal_container, classdev);
58*4882a593Smuzhiyun 	return ic->cont;
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(attribute_container_classdev_to_container);
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun static LIST_HEAD(attribute_container_list);
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun static DEFINE_MUTEX(attribute_container_mutex);
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun /**
67*4882a593Smuzhiyun  * attribute_container_register - register an attribute container
68*4882a593Smuzhiyun  *
69*4882a593Smuzhiyun  * @cont: The container to register.  This must be allocated by the
70*4882a593Smuzhiyun  *        callee and should also be zeroed by it.
71*4882a593Smuzhiyun  */
72*4882a593Smuzhiyun int
attribute_container_register(struct attribute_container * cont)73*4882a593Smuzhiyun attribute_container_register(struct attribute_container *cont)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun 	INIT_LIST_HEAD(&cont->node);
76*4882a593Smuzhiyun 	klist_init(&cont->containers, internal_container_klist_get,
77*4882a593Smuzhiyun 		   internal_container_klist_put);
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	mutex_lock(&attribute_container_mutex);
80*4882a593Smuzhiyun 	list_add_tail(&cont->node, &attribute_container_list);
81*4882a593Smuzhiyun 	mutex_unlock(&attribute_container_mutex);
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	return 0;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(attribute_container_register);
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun /**
88*4882a593Smuzhiyun  * attribute_container_unregister - remove a container registration
89*4882a593Smuzhiyun  *
90*4882a593Smuzhiyun  * @cont: previously registered container to remove
91*4882a593Smuzhiyun  */
92*4882a593Smuzhiyun int
attribute_container_unregister(struct attribute_container * cont)93*4882a593Smuzhiyun attribute_container_unregister(struct attribute_container *cont)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun 	int retval = -EBUSY;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	mutex_lock(&attribute_container_mutex);
98*4882a593Smuzhiyun 	spin_lock(&cont->containers.k_lock);
99*4882a593Smuzhiyun 	if (!list_empty(&cont->containers.k_list))
100*4882a593Smuzhiyun 		goto out;
101*4882a593Smuzhiyun 	retval = 0;
102*4882a593Smuzhiyun 	list_del(&cont->node);
103*4882a593Smuzhiyun  out:
104*4882a593Smuzhiyun 	spin_unlock(&cont->containers.k_lock);
105*4882a593Smuzhiyun 	mutex_unlock(&attribute_container_mutex);
106*4882a593Smuzhiyun 	return retval;
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(attribute_container_unregister);
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun /* private function used as class release */
attribute_container_release(struct device * classdev)112*4882a593Smuzhiyun static void attribute_container_release(struct device *classdev)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun 	struct internal_container *ic
115*4882a593Smuzhiyun 		= container_of(classdev, struct internal_container, classdev);
116*4882a593Smuzhiyun 	struct device *dev = classdev->parent;
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	kfree(ic);
119*4882a593Smuzhiyun 	put_device(dev);
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun /**
123*4882a593Smuzhiyun  * attribute_container_add_device - see if any container is interested in dev
124*4882a593Smuzhiyun  *
125*4882a593Smuzhiyun  * @dev: device to add attributes to
126*4882a593Smuzhiyun  * @fn:	 function to trigger addition of class device.
127*4882a593Smuzhiyun  *
128*4882a593Smuzhiyun  * This function allocates storage for the class device(s) to be
129*4882a593Smuzhiyun  * attached to dev (one for each matching attribute_container).  If no
130*4882a593Smuzhiyun  * fn is provided, the code will simply register the class device via
131*4882a593Smuzhiyun  * device_add.  If a function is provided, it is expected to add
132*4882a593Smuzhiyun  * the class device at the appropriate time.  One of the things that
133*4882a593Smuzhiyun  * might be necessary is to allocate and initialise the classdev and
134*4882a593Smuzhiyun  * then add it a later time.  To do this, call this routine for
135*4882a593Smuzhiyun  * allocation and initialisation and then use
136*4882a593Smuzhiyun  * attribute_container_device_trigger() to call device_add() on
137*4882a593Smuzhiyun  * it.  Note: after this, the class device contains a reference to dev
138*4882a593Smuzhiyun  * which is not relinquished until the release of the classdev.
139*4882a593Smuzhiyun  */
140*4882a593Smuzhiyun void
attribute_container_add_device(struct device * dev,int (* fn)(struct attribute_container *,struct device *,struct device *))141*4882a593Smuzhiyun attribute_container_add_device(struct device *dev,
142*4882a593Smuzhiyun 			       int (*fn)(struct attribute_container *,
143*4882a593Smuzhiyun 					 struct device *,
144*4882a593Smuzhiyun 					 struct device *))
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun 	struct attribute_container *cont;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	mutex_lock(&attribute_container_mutex);
149*4882a593Smuzhiyun 	list_for_each_entry(cont, &attribute_container_list, node) {
150*4882a593Smuzhiyun 		struct internal_container *ic;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 		if (attribute_container_no_classdevs(cont))
153*4882a593Smuzhiyun 			continue;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 		if (!cont->match(cont, dev))
156*4882a593Smuzhiyun 			continue;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 		ic = kzalloc(sizeof(*ic), GFP_KERNEL);
159*4882a593Smuzhiyun 		if (!ic) {
160*4882a593Smuzhiyun 			dev_err(dev, "failed to allocate class container\n");
161*4882a593Smuzhiyun 			continue;
162*4882a593Smuzhiyun 		}
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 		ic->cont = cont;
165*4882a593Smuzhiyun 		device_initialize(&ic->classdev);
166*4882a593Smuzhiyun 		ic->classdev.parent = get_device(dev);
167*4882a593Smuzhiyun 		ic->classdev.class = cont->class;
168*4882a593Smuzhiyun 		cont->class->dev_release = attribute_container_release;
169*4882a593Smuzhiyun 		dev_set_name(&ic->classdev, "%s", dev_name(dev));
170*4882a593Smuzhiyun 		if (fn)
171*4882a593Smuzhiyun 			fn(cont, dev, &ic->classdev);
172*4882a593Smuzhiyun 		else
173*4882a593Smuzhiyun 			attribute_container_add_class_device(&ic->classdev);
174*4882a593Smuzhiyun 		klist_add_tail(&ic->node, &cont->containers);
175*4882a593Smuzhiyun 	}
176*4882a593Smuzhiyun 	mutex_unlock(&attribute_container_mutex);
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun /* FIXME: can't break out of this unless klist_iter_exit is also
180*4882a593Smuzhiyun  * called before doing the break
181*4882a593Smuzhiyun  */
182*4882a593Smuzhiyun #define klist_for_each_entry(pos, head, member, iter) \
183*4882a593Smuzhiyun 	for (klist_iter_init(head, iter); (pos = ({ \
184*4882a593Smuzhiyun 		struct klist_node *n = klist_next(iter); \
185*4882a593Smuzhiyun 		n ? container_of(n, typeof(*pos), member) : \
186*4882a593Smuzhiyun 			({ klist_iter_exit(iter) ; NULL; }); \
187*4882a593Smuzhiyun 	})) != NULL;)
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun /**
191*4882a593Smuzhiyun  * attribute_container_remove_device - make device eligible for removal.
192*4882a593Smuzhiyun  *
193*4882a593Smuzhiyun  * @dev:  The generic device
194*4882a593Smuzhiyun  * @fn:	  A function to call to remove the device
195*4882a593Smuzhiyun  *
196*4882a593Smuzhiyun  * This routine triggers device removal.  If fn is NULL, then it is
197*4882a593Smuzhiyun  * simply done via device_unregister (note that if something
198*4882a593Smuzhiyun  * still has a reference to the classdev, then the memory occupied
199*4882a593Smuzhiyun  * will not be freed until the classdev is released).  If you want a
200*4882a593Smuzhiyun  * two phase release: remove from visibility and then delete the
201*4882a593Smuzhiyun  * device, then you should use this routine with a fn that calls
202*4882a593Smuzhiyun  * device_del() and then use attribute_container_device_trigger()
203*4882a593Smuzhiyun  * to do the final put on the classdev.
204*4882a593Smuzhiyun  */
205*4882a593Smuzhiyun void
attribute_container_remove_device(struct device * dev,void (* fn)(struct attribute_container *,struct device *,struct device *))206*4882a593Smuzhiyun attribute_container_remove_device(struct device *dev,
207*4882a593Smuzhiyun 				  void (*fn)(struct attribute_container *,
208*4882a593Smuzhiyun 					     struct device *,
209*4882a593Smuzhiyun 					     struct device *))
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun 	struct attribute_container *cont;
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	mutex_lock(&attribute_container_mutex);
214*4882a593Smuzhiyun 	list_for_each_entry(cont, &attribute_container_list, node) {
215*4882a593Smuzhiyun 		struct internal_container *ic;
216*4882a593Smuzhiyun 		struct klist_iter iter;
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 		if (attribute_container_no_classdevs(cont))
219*4882a593Smuzhiyun 			continue;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 		if (!cont->match(cont, dev))
222*4882a593Smuzhiyun 			continue;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 		klist_for_each_entry(ic, &cont->containers, node, &iter) {
225*4882a593Smuzhiyun 			if (dev != ic->classdev.parent)
226*4882a593Smuzhiyun 				continue;
227*4882a593Smuzhiyun 			klist_del(&ic->node);
228*4882a593Smuzhiyun 			if (fn)
229*4882a593Smuzhiyun 				fn(cont, dev, &ic->classdev);
230*4882a593Smuzhiyun 			else {
231*4882a593Smuzhiyun 				attribute_container_remove_attrs(&ic->classdev);
232*4882a593Smuzhiyun 				device_unregister(&ic->classdev);
233*4882a593Smuzhiyun 			}
234*4882a593Smuzhiyun 		}
235*4882a593Smuzhiyun 	}
236*4882a593Smuzhiyun 	mutex_unlock(&attribute_container_mutex);
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun static int
do_attribute_container_device_trigger_safe(struct device * dev,struct attribute_container * cont,int (* fn)(struct attribute_container *,struct device *,struct device *),int (* undo)(struct attribute_container *,struct device *,struct device *))240*4882a593Smuzhiyun do_attribute_container_device_trigger_safe(struct device *dev,
241*4882a593Smuzhiyun 					   struct attribute_container *cont,
242*4882a593Smuzhiyun 					   int (*fn)(struct attribute_container *,
243*4882a593Smuzhiyun 						     struct device *, struct device *),
244*4882a593Smuzhiyun 					   int (*undo)(struct attribute_container *,
245*4882a593Smuzhiyun 						       struct device *, struct device *))
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun 	int ret;
248*4882a593Smuzhiyun 	struct internal_container *ic, *failed;
249*4882a593Smuzhiyun 	struct klist_iter iter;
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	if (attribute_container_no_classdevs(cont))
252*4882a593Smuzhiyun 		return fn(cont, dev, NULL);
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	klist_for_each_entry(ic, &cont->containers, node, &iter) {
255*4882a593Smuzhiyun 		if (dev == ic->classdev.parent) {
256*4882a593Smuzhiyun 			ret = fn(cont, dev, &ic->classdev);
257*4882a593Smuzhiyun 			if (ret) {
258*4882a593Smuzhiyun 				failed = ic;
259*4882a593Smuzhiyun 				klist_iter_exit(&iter);
260*4882a593Smuzhiyun 				goto fail;
261*4882a593Smuzhiyun 			}
262*4882a593Smuzhiyun 		}
263*4882a593Smuzhiyun 	}
264*4882a593Smuzhiyun 	return 0;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun fail:
267*4882a593Smuzhiyun 	if (!undo)
268*4882a593Smuzhiyun 		return ret;
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	/* Attempt to undo the work partially done. */
271*4882a593Smuzhiyun 	klist_for_each_entry(ic, &cont->containers, node, &iter) {
272*4882a593Smuzhiyun 		if (ic == failed) {
273*4882a593Smuzhiyun 			klist_iter_exit(&iter);
274*4882a593Smuzhiyun 			break;
275*4882a593Smuzhiyun 		}
276*4882a593Smuzhiyun 		if (dev == ic->classdev.parent)
277*4882a593Smuzhiyun 			undo(cont, dev, &ic->classdev);
278*4882a593Smuzhiyun 	}
279*4882a593Smuzhiyun 	return ret;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun /**
283*4882a593Smuzhiyun  * attribute_container_device_trigger_safe - execute a trigger for each
284*4882a593Smuzhiyun  * matching classdev or fail all of them.
285*4882a593Smuzhiyun  *
286*4882a593Smuzhiyun  * @dev:  The generic device to run the trigger for
287*4882a593Smuzhiyun  * @fn	  the function to execute for each classdev.
288*4882a593Smuzhiyun  * @undo  A function to undo the work previously done in case of error
289*4882a593Smuzhiyun  *
290*4882a593Smuzhiyun  * This function is a safe version of
291*4882a593Smuzhiyun  * attribute_container_device_trigger. It stops on the first error and
292*4882a593Smuzhiyun  * undo the partial work that has been done, on previous classdev.  It
293*4882a593Smuzhiyun  * is guaranteed that either they all succeeded, or none of them
294*4882a593Smuzhiyun  * succeeded.
295*4882a593Smuzhiyun  */
296*4882a593Smuzhiyun int
attribute_container_device_trigger_safe(struct device * dev,int (* fn)(struct attribute_container *,struct device *,struct device *),int (* undo)(struct attribute_container *,struct device *,struct device *))297*4882a593Smuzhiyun attribute_container_device_trigger_safe(struct device *dev,
298*4882a593Smuzhiyun 					int (*fn)(struct attribute_container *,
299*4882a593Smuzhiyun 						  struct device *,
300*4882a593Smuzhiyun 						  struct device *),
301*4882a593Smuzhiyun 					int (*undo)(struct attribute_container *,
302*4882a593Smuzhiyun 						    struct device *,
303*4882a593Smuzhiyun 						    struct device *))
304*4882a593Smuzhiyun {
305*4882a593Smuzhiyun 	struct attribute_container *cont, *failed = NULL;
306*4882a593Smuzhiyun 	int ret = 0;
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	mutex_lock(&attribute_container_mutex);
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	list_for_each_entry(cont, &attribute_container_list, node) {
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 		if (!cont->match(cont, dev))
313*4882a593Smuzhiyun 			continue;
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 		ret = do_attribute_container_device_trigger_safe(dev, cont,
316*4882a593Smuzhiyun 								 fn, undo);
317*4882a593Smuzhiyun 		if (ret) {
318*4882a593Smuzhiyun 			failed = cont;
319*4882a593Smuzhiyun 			break;
320*4882a593Smuzhiyun 		}
321*4882a593Smuzhiyun 	}
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	if (ret && !WARN_ON(!undo)) {
324*4882a593Smuzhiyun 		list_for_each_entry(cont, &attribute_container_list, node) {
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 			if (failed == cont)
327*4882a593Smuzhiyun 				break;
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 			if (!cont->match(cont, dev))
330*4882a593Smuzhiyun 				continue;
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 			do_attribute_container_device_trigger_safe(dev, cont,
333*4882a593Smuzhiyun 								   undo, NULL);
334*4882a593Smuzhiyun 		}
335*4882a593Smuzhiyun 	}
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	mutex_unlock(&attribute_container_mutex);
338*4882a593Smuzhiyun 	return ret;
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun /**
343*4882a593Smuzhiyun  * attribute_container_device_trigger - execute a trigger for each matching classdev
344*4882a593Smuzhiyun  *
345*4882a593Smuzhiyun  * @dev:  The generic device to run the trigger for
346*4882a593Smuzhiyun  * @fn	  the function to execute for each classdev.
347*4882a593Smuzhiyun  *
348*4882a593Smuzhiyun  * This function is for executing a trigger when you need to know both
349*4882a593Smuzhiyun  * the container and the classdev.  If you only care about the
350*4882a593Smuzhiyun  * container, then use attribute_container_trigger() instead.
351*4882a593Smuzhiyun  */
352*4882a593Smuzhiyun void
attribute_container_device_trigger(struct device * dev,int (* fn)(struct attribute_container *,struct device *,struct device *))353*4882a593Smuzhiyun attribute_container_device_trigger(struct device *dev,
354*4882a593Smuzhiyun 				   int (*fn)(struct attribute_container *,
355*4882a593Smuzhiyun 					     struct device *,
356*4882a593Smuzhiyun 					     struct device *))
357*4882a593Smuzhiyun {
358*4882a593Smuzhiyun 	struct attribute_container *cont;
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	mutex_lock(&attribute_container_mutex);
361*4882a593Smuzhiyun 	list_for_each_entry(cont, &attribute_container_list, node) {
362*4882a593Smuzhiyun 		struct internal_container *ic;
363*4882a593Smuzhiyun 		struct klist_iter iter;
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 		if (!cont->match(cont, dev))
366*4882a593Smuzhiyun 			continue;
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 		if (attribute_container_no_classdevs(cont)) {
369*4882a593Smuzhiyun 			fn(cont, dev, NULL);
370*4882a593Smuzhiyun 			continue;
371*4882a593Smuzhiyun 		}
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 		klist_for_each_entry(ic, &cont->containers, node, &iter) {
374*4882a593Smuzhiyun 			if (dev == ic->classdev.parent)
375*4882a593Smuzhiyun 				fn(cont, dev, &ic->classdev);
376*4882a593Smuzhiyun 		}
377*4882a593Smuzhiyun 	}
378*4882a593Smuzhiyun 	mutex_unlock(&attribute_container_mutex);
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun /**
382*4882a593Smuzhiyun  * attribute_container_trigger - trigger a function for each matching container
383*4882a593Smuzhiyun  *
384*4882a593Smuzhiyun  * @dev:  The generic device to activate the trigger for
385*4882a593Smuzhiyun  * @fn:	  the function to trigger
386*4882a593Smuzhiyun  *
387*4882a593Smuzhiyun  * This routine triggers a function that only needs to know the
388*4882a593Smuzhiyun  * matching containers (not the classdev) associated with a device.
389*4882a593Smuzhiyun  * It is more lightweight than attribute_container_device_trigger, so
390*4882a593Smuzhiyun  * should be used in preference unless the triggering function
391*4882a593Smuzhiyun  * actually needs to know the classdev.
392*4882a593Smuzhiyun  */
393*4882a593Smuzhiyun void
attribute_container_trigger(struct device * dev,int (* fn)(struct attribute_container *,struct device *))394*4882a593Smuzhiyun attribute_container_trigger(struct device *dev,
395*4882a593Smuzhiyun 			    int (*fn)(struct attribute_container *,
396*4882a593Smuzhiyun 				      struct device *))
397*4882a593Smuzhiyun {
398*4882a593Smuzhiyun 	struct attribute_container *cont;
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	mutex_lock(&attribute_container_mutex);
401*4882a593Smuzhiyun 	list_for_each_entry(cont, &attribute_container_list, node) {
402*4882a593Smuzhiyun 		if (cont->match(cont, dev))
403*4882a593Smuzhiyun 			fn(cont, dev);
404*4882a593Smuzhiyun 	}
405*4882a593Smuzhiyun 	mutex_unlock(&attribute_container_mutex);
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun /**
409*4882a593Smuzhiyun  * attribute_container_add_attrs - add attributes
410*4882a593Smuzhiyun  *
411*4882a593Smuzhiyun  * @classdev: The class device
412*4882a593Smuzhiyun  *
413*4882a593Smuzhiyun  * This simply creates all the class device sysfs files from the
414*4882a593Smuzhiyun  * attributes listed in the container
415*4882a593Smuzhiyun  */
416*4882a593Smuzhiyun int
attribute_container_add_attrs(struct device * classdev)417*4882a593Smuzhiyun attribute_container_add_attrs(struct device *classdev)
418*4882a593Smuzhiyun {
419*4882a593Smuzhiyun 	struct attribute_container *cont =
420*4882a593Smuzhiyun 		attribute_container_classdev_to_container(classdev);
421*4882a593Smuzhiyun 	struct device_attribute **attrs = cont->attrs;
422*4882a593Smuzhiyun 	int i, error;
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	BUG_ON(attrs && cont->grp);
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 	if (!attrs && !cont->grp)
427*4882a593Smuzhiyun 		return 0;
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	if (cont->grp)
430*4882a593Smuzhiyun 		return sysfs_create_group(&classdev->kobj, cont->grp);
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	for (i = 0; attrs[i]; i++) {
433*4882a593Smuzhiyun 		sysfs_attr_init(&attrs[i]->attr);
434*4882a593Smuzhiyun 		error = device_create_file(classdev, attrs[i]);
435*4882a593Smuzhiyun 		if (error)
436*4882a593Smuzhiyun 			return error;
437*4882a593Smuzhiyun 	}
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	return 0;
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun /**
443*4882a593Smuzhiyun  * attribute_container_add_class_device - same function as device_add
444*4882a593Smuzhiyun  *
445*4882a593Smuzhiyun  * @classdev:	the class device to add
446*4882a593Smuzhiyun  *
447*4882a593Smuzhiyun  * This performs essentially the same function as device_add except for
448*4882a593Smuzhiyun  * attribute containers, namely add the classdev to the system and then
449*4882a593Smuzhiyun  * create the attribute files
450*4882a593Smuzhiyun  */
451*4882a593Smuzhiyun int
attribute_container_add_class_device(struct device * classdev)452*4882a593Smuzhiyun attribute_container_add_class_device(struct device *classdev)
453*4882a593Smuzhiyun {
454*4882a593Smuzhiyun 	int error = device_add(classdev);
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	if (error)
457*4882a593Smuzhiyun 		return error;
458*4882a593Smuzhiyun 	return attribute_container_add_attrs(classdev);
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun /**
462*4882a593Smuzhiyun  * attribute_container_add_class_device_adapter - simple adapter for triggers
463*4882a593Smuzhiyun  *
464*4882a593Smuzhiyun  * This function is identical to attribute_container_add_class_device except
465*4882a593Smuzhiyun  * that it is designed to be called from the triggers
466*4882a593Smuzhiyun  */
467*4882a593Smuzhiyun int
attribute_container_add_class_device_adapter(struct attribute_container * cont,struct device * dev,struct device * classdev)468*4882a593Smuzhiyun attribute_container_add_class_device_adapter(struct attribute_container *cont,
469*4882a593Smuzhiyun 					     struct device *dev,
470*4882a593Smuzhiyun 					     struct device *classdev)
471*4882a593Smuzhiyun {
472*4882a593Smuzhiyun 	return attribute_container_add_class_device(classdev);
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun /**
476*4882a593Smuzhiyun  * attribute_container_remove_attrs - remove any attribute files
477*4882a593Smuzhiyun  *
478*4882a593Smuzhiyun  * @classdev: The class device to remove the files from
479*4882a593Smuzhiyun  *
480*4882a593Smuzhiyun  */
481*4882a593Smuzhiyun void
attribute_container_remove_attrs(struct device * classdev)482*4882a593Smuzhiyun attribute_container_remove_attrs(struct device *classdev)
483*4882a593Smuzhiyun {
484*4882a593Smuzhiyun 	struct attribute_container *cont =
485*4882a593Smuzhiyun 		attribute_container_classdev_to_container(classdev);
486*4882a593Smuzhiyun 	struct device_attribute **attrs = cont->attrs;
487*4882a593Smuzhiyun 	int i;
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 	if (!attrs && !cont->grp)
490*4882a593Smuzhiyun 		return;
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	if (cont->grp) {
493*4882a593Smuzhiyun 		sysfs_remove_group(&classdev->kobj, cont->grp);
494*4882a593Smuzhiyun 		return ;
495*4882a593Smuzhiyun 	}
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	for (i = 0; attrs[i]; i++)
498*4882a593Smuzhiyun 		device_remove_file(classdev, attrs[i]);
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun /**
502*4882a593Smuzhiyun  * attribute_container_class_device_del - equivalent of class_device_del
503*4882a593Smuzhiyun  *
504*4882a593Smuzhiyun  * @classdev: the class device
505*4882a593Smuzhiyun  *
506*4882a593Smuzhiyun  * This function simply removes all the attribute files and then calls
507*4882a593Smuzhiyun  * device_del.
508*4882a593Smuzhiyun  */
509*4882a593Smuzhiyun void
attribute_container_class_device_del(struct device * classdev)510*4882a593Smuzhiyun attribute_container_class_device_del(struct device *classdev)
511*4882a593Smuzhiyun {
512*4882a593Smuzhiyun 	attribute_container_remove_attrs(classdev);
513*4882a593Smuzhiyun 	device_del(classdev);
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun /**
517*4882a593Smuzhiyun  * attribute_container_find_class_device - find the corresponding class_device
518*4882a593Smuzhiyun  *
519*4882a593Smuzhiyun  * @cont:	the container
520*4882a593Smuzhiyun  * @dev:	the generic device
521*4882a593Smuzhiyun  *
522*4882a593Smuzhiyun  * Looks up the device in the container's list of class devices and returns
523*4882a593Smuzhiyun  * the corresponding class_device.
524*4882a593Smuzhiyun  */
525*4882a593Smuzhiyun struct device *
attribute_container_find_class_device(struct attribute_container * cont,struct device * dev)526*4882a593Smuzhiyun attribute_container_find_class_device(struct attribute_container *cont,
527*4882a593Smuzhiyun 				      struct device *dev)
528*4882a593Smuzhiyun {
529*4882a593Smuzhiyun 	struct device *cdev = NULL;
530*4882a593Smuzhiyun 	struct internal_container *ic;
531*4882a593Smuzhiyun 	struct klist_iter iter;
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	klist_for_each_entry(ic, &cont->containers, node, &iter) {
534*4882a593Smuzhiyun 		if (ic->classdev.parent == dev) {
535*4882a593Smuzhiyun 			cdev = &ic->classdev;
536*4882a593Smuzhiyun 			/* FIXME: must exit iterator then break */
537*4882a593Smuzhiyun 			klist_iter_exit(&iter);
538*4882a593Smuzhiyun 			break;
539*4882a593Smuzhiyun 		}
540*4882a593Smuzhiyun 	}
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	return cdev;
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(attribute_container_find_class_device);
545