1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * drivers/base/core.c - core driver model code (device registration, etc)
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (c) 2002-3 Patrick Mochel
6*4882a593Smuzhiyun * Copyright (c) 2002-3 Open Source Development Labs
7*4882a593Smuzhiyun * Copyright (c) 2006 Greg Kroah-Hartman <gregkh@suse.de>
8*4882a593Smuzhiyun * Copyright (c) 2006 Novell, Inc.
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/acpi.h>
12*4882a593Smuzhiyun #include <linux/cpufreq.h>
13*4882a593Smuzhiyun #include <linux/device.h>
14*4882a593Smuzhiyun #include <linux/err.h>
15*4882a593Smuzhiyun #include <linux/fwnode.h>
16*4882a593Smuzhiyun #include <linux/init.h>
17*4882a593Smuzhiyun #include <linux/module.h>
18*4882a593Smuzhiyun #include <linux/slab.h>
19*4882a593Smuzhiyun #include <linux/string.h>
20*4882a593Smuzhiyun #include <linux/kdev_t.h>
21*4882a593Smuzhiyun #include <linux/notifier.h>
22*4882a593Smuzhiyun #include <linux/of.h>
23*4882a593Smuzhiyun #include <linux/of_device.h>
24*4882a593Smuzhiyun #include <linux/genhd.h>
25*4882a593Smuzhiyun #include <linux/mutex.h>
26*4882a593Smuzhiyun #include <linux/pm_runtime.h>
27*4882a593Smuzhiyun #include <linux/netdevice.h>
28*4882a593Smuzhiyun #include <linux/sched/signal.h>
29*4882a593Smuzhiyun #include <linux/sched/mm.h>
30*4882a593Smuzhiyun #include <linux/sysfs.h>
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #include "base.h"
33*4882a593Smuzhiyun #include "power/power.h"
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #ifdef CONFIG_SYSFS_DEPRECATED
36*4882a593Smuzhiyun #ifdef CONFIG_SYSFS_DEPRECATED_V2
37*4882a593Smuzhiyun long sysfs_deprecated = 1;
38*4882a593Smuzhiyun #else
39*4882a593Smuzhiyun long sysfs_deprecated = 0;
40*4882a593Smuzhiyun #endif
sysfs_deprecated_setup(char * arg)41*4882a593Smuzhiyun static int __init sysfs_deprecated_setup(char *arg)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun return kstrtol(arg, 10, &sysfs_deprecated);
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun early_param("sysfs.deprecated", sysfs_deprecated_setup);
46*4882a593Smuzhiyun #endif
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun /* Device links support. */
49*4882a593Smuzhiyun static LIST_HEAD(deferred_sync);
50*4882a593Smuzhiyun static unsigned int defer_sync_state_count = 1;
51*4882a593Smuzhiyun static DEFINE_MUTEX(fwnode_link_lock);
52*4882a593Smuzhiyun static bool fw_devlink_is_permissive(void);
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun /**
55*4882a593Smuzhiyun * fwnode_link_add - Create a link between two fwnode_handles.
56*4882a593Smuzhiyun * @con: Consumer end of the link.
57*4882a593Smuzhiyun * @sup: Supplier end of the link.
58*4882a593Smuzhiyun *
59*4882a593Smuzhiyun * Create a fwnode link between fwnode handles @con and @sup. The fwnode link
60*4882a593Smuzhiyun * represents the detail that the firmware lists @sup fwnode as supplying a
61*4882a593Smuzhiyun * resource to @con.
62*4882a593Smuzhiyun *
63*4882a593Smuzhiyun * The driver core will use the fwnode link to create a device link between the
64*4882a593Smuzhiyun * two device objects corresponding to @con and @sup when they are created. The
65*4882a593Smuzhiyun * driver core will automatically delete the fwnode link between @con and @sup
66*4882a593Smuzhiyun * after doing that.
67*4882a593Smuzhiyun *
68*4882a593Smuzhiyun * Attempts to create duplicate links between the same pair of fwnode handles
69*4882a593Smuzhiyun * are ignored and there is no reference counting.
70*4882a593Smuzhiyun */
fwnode_link_add(struct fwnode_handle * con,struct fwnode_handle * sup)71*4882a593Smuzhiyun int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun struct fwnode_link *link;
74*4882a593Smuzhiyun int ret = 0;
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun mutex_lock(&fwnode_link_lock);
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun list_for_each_entry(link, &sup->consumers, s_hook)
79*4882a593Smuzhiyun if (link->consumer == con)
80*4882a593Smuzhiyun goto out;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun link = kzalloc(sizeof(*link), GFP_KERNEL);
83*4882a593Smuzhiyun if (!link) {
84*4882a593Smuzhiyun ret = -ENOMEM;
85*4882a593Smuzhiyun goto out;
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun link->supplier = sup;
89*4882a593Smuzhiyun INIT_LIST_HEAD(&link->s_hook);
90*4882a593Smuzhiyun link->consumer = con;
91*4882a593Smuzhiyun INIT_LIST_HEAD(&link->c_hook);
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun list_add(&link->s_hook, &sup->consumers);
94*4882a593Smuzhiyun list_add(&link->c_hook, &con->suppliers);
95*4882a593Smuzhiyun out:
96*4882a593Smuzhiyun mutex_unlock(&fwnode_link_lock);
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun return ret;
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun /**
102*4882a593Smuzhiyun * fwnode_links_purge_suppliers - Delete all supplier links of fwnode_handle.
103*4882a593Smuzhiyun * @fwnode: fwnode whose supplier links need to be deleted
104*4882a593Smuzhiyun *
105*4882a593Smuzhiyun * Deletes all supplier links connecting directly to @fwnode.
106*4882a593Smuzhiyun */
fwnode_links_purge_suppliers(struct fwnode_handle * fwnode)107*4882a593Smuzhiyun static void fwnode_links_purge_suppliers(struct fwnode_handle *fwnode)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun struct fwnode_link *link, *tmp;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun mutex_lock(&fwnode_link_lock);
112*4882a593Smuzhiyun list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) {
113*4882a593Smuzhiyun list_del(&link->s_hook);
114*4882a593Smuzhiyun list_del(&link->c_hook);
115*4882a593Smuzhiyun kfree(link);
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun mutex_unlock(&fwnode_link_lock);
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun /**
121*4882a593Smuzhiyun * fwnode_links_purge_consumers - Delete all consumer links of fwnode_handle.
122*4882a593Smuzhiyun * @fwnode: fwnode whose consumer links need to be deleted
123*4882a593Smuzhiyun *
124*4882a593Smuzhiyun * Deletes all consumer links connecting directly to @fwnode.
125*4882a593Smuzhiyun */
fwnode_links_purge_consumers(struct fwnode_handle * fwnode)126*4882a593Smuzhiyun static void fwnode_links_purge_consumers(struct fwnode_handle *fwnode)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun struct fwnode_link *link, *tmp;
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun mutex_lock(&fwnode_link_lock);
131*4882a593Smuzhiyun list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook) {
132*4882a593Smuzhiyun list_del(&link->s_hook);
133*4882a593Smuzhiyun list_del(&link->c_hook);
134*4882a593Smuzhiyun kfree(link);
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun mutex_unlock(&fwnode_link_lock);
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun /**
140*4882a593Smuzhiyun * fwnode_links_purge - Delete all links connected to a fwnode_handle.
141*4882a593Smuzhiyun * @fwnode: fwnode whose links needs to be deleted
142*4882a593Smuzhiyun *
143*4882a593Smuzhiyun * Deletes all links connecting directly to a fwnode.
144*4882a593Smuzhiyun */
fwnode_links_purge(struct fwnode_handle * fwnode)145*4882a593Smuzhiyun void fwnode_links_purge(struct fwnode_handle *fwnode)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun fwnode_links_purge_suppliers(fwnode);
148*4882a593Smuzhiyun fwnode_links_purge_consumers(fwnode);
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
fw_devlink_purge_absent_suppliers(struct fwnode_handle * fwnode)151*4882a593Smuzhiyun static void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun struct fwnode_handle *child;
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun /* Don't purge consumer links of an added child */
156*4882a593Smuzhiyun if (fwnode->dev)
157*4882a593Smuzhiyun return;
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun fwnode->flags |= FWNODE_FLAG_NOT_DEVICE;
160*4882a593Smuzhiyun fwnode_links_purge_consumers(fwnode);
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun fwnode_for_each_available_child_node(fwnode, child)
163*4882a593Smuzhiyun fw_devlink_purge_absent_suppliers(child);
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun #ifdef CONFIG_SRCU
167*4882a593Smuzhiyun static DEFINE_MUTEX(device_links_lock);
168*4882a593Smuzhiyun DEFINE_STATIC_SRCU(device_links_srcu);
169*4882a593Smuzhiyun
device_links_write_lock(void)170*4882a593Smuzhiyun static inline void device_links_write_lock(void)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun mutex_lock(&device_links_lock);
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
device_links_write_unlock(void)175*4882a593Smuzhiyun static inline void device_links_write_unlock(void)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun mutex_unlock(&device_links_lock);
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun
device_links_read_lock(void)180*4882a593Smuzhiyun int device_links_read_lock(void) __acquires(&device_links_srcu)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun return srcu_read_lock(&device_links_srcu);
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
device_links_read_unlock(int idx)185*4882a593Smuzhiyun void device_links_read_unlock(int idx) __releases(&device_links_srcu)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun srcu_read_unlock(&device_links_srcu, idx);
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
device_links_read_lock_held(void)190*4882a593Smuzhiyun int device_links_read_lock_held(void)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun return srcu_read_lock_held(&device_links_srcu);
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
device_link_synchronize_removal(void)195*4882a593Smuzhiyun static void device_link_synchronize_removal(void)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun synchronize_srcu(&device_links_srcu);
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun
device_link_remove_from_lists(struct device_link * link)200*4882a593Smuzhiyun static void device_link_remove_from_lists(struct device_link *link)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun list_del_rcu(&link->s_node);
203*4882a593Smuzhiyun list_del_rcu(&link->c_node);
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun #else /* !CONFIG_SRCU */
206*4882a593Smuzhiyun static DECLARE_RWSEM(device_links_lock);
207*4882a593Smuzhiyun
device_links_write_lock(void)208*4882a593Smuzhiyun static inline void device_links_write_lock(void)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun down_write(&device_links_lock);
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun
device_links_write_unlock(void)213*4882a593Smuzhiyun static inline void device_links_write_unlock(void)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun up_write(&device_links_lock);
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun
device_links_read_lock(void)218*4882a593Smuzhiyun int device_links_read_lock(void)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun down_read(&device_links_lock);
221*4882a593Smuzhiyun return 0;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
device_links_read_unlock(int not_used)224*4882a593Smuzhiyun void device_links_read_unlock(int not_used)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun up_read(&device_links_lock);
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_LOCK_ALLOC
device_links_read_lock_held(void)230*4882a593Smuzhiyun int device_links_read_lock_held(void)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun return lockdep_is_held(&device_links_lock);
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun #endif
235*4882a593Smuzhiyun
device_link_synchronize_removal(void)236*4882a593Smuzhiyun static inline void device_link_synchronize_removal(void)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun
device_link_remove_from_lists(struct device_link * link)240*4882a593Smuzhiyun static void device_link_remove_from_lists(struct device_link *link)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun list_del(&link->s_node);
243*4882a593Smuzhiyun list_del(&link->c_node);
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun #endif /* !CONFIG_SRCU */
246*4882a593Smuzhiyun
device_is_ancestor(struct device * dev,struct device * target)247*4882a593Smuzhiyun static bool device_is_ancestor(struct device *dev, struct device *target)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun while (target->parent) {
250*4882a593Smuzhiyun target = target->parent;
251*4882a593Smuzhiyun if (dev == target)
252*4882a593Smuzhiyun return true;
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun return false;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun /**
258*4882a593Smuzhiyun * device_is_dependent - Check if one device depends on another one
259*4882a593Smuzhiyun * @dev: Device to check dependencies for.
260*4882a593Smuzhiyun * @target: Device to check against.
261*4882a593Smuzhiyun *
262*4882a593Smuzhiyun * Check if @target depends on @dev or any device dependent on it (its child or
263*4882a593Smuzhiyun * its consumer etc). Return 1 if that is the case or 0 otherwise.
264*4882a593Smuzhiyun */
device_is_dependent(struct device * dev,void * target)265*4882a593Smuzhiyun int device_is_dependent(struct device *dev, void *target)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun struct device_link *link;
268*4882a593Smuzhiyun int ret;
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun /*
271*4882a593Smuzhiyun * The "ancestors" check is needed to catch the case when the target
272*4882a593Smuzhiyun * device has not been completely initialized yet and it is still
273*4882a593Smuzhiyun * missing from the list of children of its parent device.
274*4882a593Smuzhiyun */
275*4882a593Smuzhiyun if (dev == target || device_is_ancestor(dev, target))
276*4882a593Smuzhiyun return 1;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun ret = device_for_each_child(dev, target, device_is_dependent);
279*4882a593Smuzhiyun if (ret)
280*4882a593Smuzhiyun return ret;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun list_for_each_entry(link, &dev->links.consumers, s_node) {
283*4882a593Smuzhiyun if ((link->flags & ~DL_FLAG_INFERRED) ==
284*4882a593Smuzhiyun (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
285*4882a593Smuzhiyun continue;
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun if (link->consumer == target)
288*4882a593Smuzhiyun return 1;
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun ret = device_is_dependent(link->consumer, target);
291*4882a593Smuzhiyun if (ret)
292*4882a593Smuzhiyun break;
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun return ret;
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
device_link_init_status(struct device_link * link,struct device * consumer,struct device * supplier)297*4882a593Smuzhiyun static void device_link_init_status(struct device_link *link,
298*4882a593Smuzhiyun struct device *consumer,
299*4882a593Smuzhiyun struct device *supplier)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun switch (supplier->links.status) {
302*4882a593Smuzhiyun case DL_DEV_PROBING:
303*4882a593Smuzhiyun switch (consumer->links.status) {
304*4882a593Smuzhiyun case DL_DEV_PROBING:
305*4882a593Smuzhiyun /*
306*4882a593Smuzhiyun * A consumer driver can create a link to a supplier
307*4882a593Smuzhiyun * that has not completed its probing yet as long as it
308*4882a593Smuzhiyun * knows that the supplier is already functional (for
309*4882a593Smuzhiyun * example, it has just acquired some resources from the
310*4882a593Smuzhiyun * supplier).
311*4882a593Smuzhiyun */
312*4882a593Smuzhiyun link->status = DL_STATE_CONSUMER_PROBE;
313*4882a593Smuzhiyun break;
314*4882a593Smuzhiyun default:
315*4882a593Smuzhiyun link->status = DL_STATE_DORMANT;
316*4882a593Smuzhiyun break;
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun break;
319*4882a593Smuzhiyun case DL_DEV_DRIVER_BOUND:
320*4882a593Smuzhiyun switch (consumer->links.status) {
321*4882a593Smuzhiyun case DL_DEV_PROBING:
322*4882a593Smuzhiyun link->status = DL_STATE_CONSUMER_PROBE;
323*4882a593Smuzhiyun break;
324*4882a593Smuzhiyun case DL_DEV_DRIVER_BOUND:
325*4882a593Smuzhiyun link->status = DL_STATE_ACTIVE;
326*4882a593Smuzhiyun break;
327*4882a593Smuzhiyun default:
328*4882a593Smuzhiyun link->status = DL_STATE_AVAILABLE;
329*4882a593Smuzhiyun break;
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun break;
332*4882a593Smuzhiyun case DL_DEV_UNBINDING:
333*4882a593Smuzhiyun link->status = DL_STATE_SUPPLIER_UNBIND;
334*4882a593Smuzhiyun break;
335*4882a593Smuzhiyun default:
336*4882a593Smuzhiyun link->status = DL_STATE_DORMANT;
337*4882a593Smuzhiyun break;
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
device_reorder_to_tail(struct device * dev,void * not_used)341*4882a593Smuzhiyun static int device_reorder_to_tail(struct device *dev, void *not_used)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun struct device_link *link;
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun /*
346*4882a593Smuzhiyun * Devices that have not been registered yet will be put to the ends
347*4882a593Smuzhiyun * of the lists during the registration, so skip them here.
348*4882a593Smuzhiyun */
349*4882a593Smuzhiyun if (device_is_registered(dev))
350*4882a593Smuzhiyun devices_kset_move_last(dev);
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun if (device_pm_initialized(dev))
353*4882a593Smuzhiyun device_pm_move_last(dev);
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun device_for_each_child(dev, NULL, device_reorder_to_tail);
356*4882a593Smuzhiyun list_for_each_entry(link, &dev->links.consumers, s_node) {
357*4882a593Smuzhiyun if ((link->flags & ~DL_FLAG_INFERRED) ==
358*4882a593Smuzhiyun (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
359*4882a593Smuzhiyun continue;
360*4882a593Smuzhiyun device_reorder_to_tail(link->consumer, NULL);
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun return 0;
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun /**
367*4882a593Smuzhiyun * device_pm_move_to_tail - Move set of devices to the end of device lists
368*4882a593Smuzhiyun * @dev: Device to move
369*4882a593Smuzhiyun *
370*4882a593Smuzhiyun * This is a device_reorder_to_tail() wrapper taking the requisite locks.
371*4882a593Smuzhiyun *
372*4882a593Smuzhiyun * It moves the @dev along with all of its children and all of its consumers
373*4882a593Smuzhiyun * to the ends of the device_kset and dpm_list, recursively.
374*4882a593Smuzhiyun */
device_pm_move_to_tail(struct device * dev)375*4882a593Smuzhiyun void device_pm_move_to_tail(struct device *dev)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun int idx;
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun idx = device_links_read_lock();
380*4882a593Smuzhiyun device_pm_lock();
381*4882a593Smuzhiyun device_reorder_to_tail(dev, NULL);
382*4882a593Smuzhiyun device_pm_unlock();
383*4882a593Smuzhiyun device_links_read_unlock(idx);
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun #define to_devlink(dev) container_of((dev), struct device_link, link_dev)
387*4882a593Smuzhiyun
status_show(struct device * dev,struct device_attribute * attr,char * buf)388*4882a593Smuzhiyun static ssize_t status_show(struct device *dev,
389*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
390*4882a593Smuzhiyun {
391*4882a593Smuzhiyun const char *output;
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun switch (to_devlink(dev)->status) {
394*4882a593Smuzhiyun case DL_STATE_NONE:
395*4882a593Smuzhiyun output = "not tracked";
396*4882a593Smuzhiyun break;
397*4882a593Smuzhiyun case DL_STATE_DORMANT:
398*4882a593Smuzhiyun output = "dormant";
399*4882a593Smuzhiyun break;
400*4882a593Smuzhiyun case DL_STATE_AVAILABLE:
401*4882a593Smuzhiyun output = "available";
402*4882a593Smuzhiyun break;
403*4882a593Smuzhiyun case DL_STATE_CONSUMER_PROBE:
404*4882a593Smuzhiyun output = "consumer probing";
405*4882a593Smuzhiyun break;
406*4882a593Smuzhiyun case DL_STATE_ACTIVE:
407*4882a593Smuzhiyun output = "active";
408*4882a593Smuzhiyun break;
409*4882a593Smuzhiyun case DL_STATE_SUPPLIER_UNBIND:
410*4882a593Smuzhiyun output = "supplier unbinding";
411*4882a593Smuzhiyun break;
412*4882a593Smuzhiyun default:
413*4882a593Smuzhiyun output = "unknown";
414*4882a593Smuzhiyun break;
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun return sysfs_emit(buf, "%s\n", output);
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun static DEVICE_ATTR_RO(status);
420*4882a593Smuzhiyun
auto_remove_on_show(struct device * dev,struct device_attribute * attr,char * buf)421*4882a593Smuzhiyun static ssize_t auto_remove_on_show(struct device *dev,
422*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
423*4882a593Smuzhiyun {
424*4882a593Smuzhiyun struct device_link *link = to_devlink(dev);
425*4882a593Smuzhiyun const char *output;
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun if (link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
428*4882a593Smuzhiyun output = "supplier unbind";
429*4882a593Smuzhiyun else if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER)
430*4882a593Smuzhiyun output = "consumer unbind";
431*4882a593Smuzhiyun else
432*4882a593Smuzhiyun output = "never";
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun return sysfs_emit(buf, "%s\n", output);
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun static DEVICE_ATTR_RO(auto_remove_on);
437*4882a593Smuzhiyun
runtime_pm_show(struct device * dev,struct device_attribute * attr,char * buf)438*4882a593Smuzhiyun static ssize_t runtime_pm_show(struct device *dev,
439*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun struct device_link *link = to_devlink(dev);
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun return sysfs_emit(buf, "%d\n", !!(link->flags & DL_FLAG_PM_RUNTIME));
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun static DEVICE_ATTR_RO(runtime_pm);
446*4882a593Smuzhiyun
sync_state_only_show(struct device * dev,struct device_attribute * attr,char * buf)447*4882a593Smuzhiyun static ssize_t sync_state_only_show(struct device *dev,
448*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
449*4882a593Smuzhiyun {
450*4882a593Smuzhiyun struct device_link *link = to_devlink(dev);
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun return sysfs_emit(buf, "%d\n",
453*4882a593Smuzhiyun !!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun static DEVICE_ATTR_RO(sync_state_only);
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun static struct attribute *devlink_attrs[] = {
458*4882a593Smuzhiyun &dev_attr_status.attr,
459*4882a593Smuzhiyun &dev_attr_auto_remove_on.attr,
460*4882a593Smuzhiyun &dev_attr_runtime_pm.attr,
461*4882a593Smuzhiyun &dev_attr_sync_state_only.attr,
462*4882a593Smuzhiyun NULL,
463*4882a593Smuzhiyun };
464*4882a593Smuzhiyun ATTRIBUTE_GROUPS(devlink);
465*4882a593Smuzhiyun
device_link_release_fn(struct work_struct * work)466*4882a593Smuzhiyun static void device_link_release_fn(struct work_struct *work)
467*4882a593Smuzhiyun {
468*4882a593Smuzhiyun struct device_link *link = container_of(work, struct device_link, rm_work);
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun /* Ensure that all references to the link object have been dropped. */
471*4882a593Smuzhiyun device_link_synchronize_removal();
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun pm_runtime_release_supplier(link);
474*4882a593Smuzhiyun pm_request_idle(link->supplier);
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun put_device(link->consumer);
477*4882a593Smuzhiyun put_device(link->supplier);
478*4882a593Smuzhiyun kfree(link);
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun
devlink_dev_release(struct device * dev)481*4882a593Smuzhiyun static void devlink_dev_release(struct device *dev)
482*4882a593Smuzhiyun {
483*4882a593Smuzhiyun struct device_link *link = to_devlink(dev);
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun INIT_WORK(&link->rm_work, device_link_release_fn);
486*4882a593Smuzhiyun /*
487*4882a593Smuzhiyun * It may take a while to complete this work because of the SRCU
488*4882a593Smuzhiyun * synchronization in device_link_release_fn() and if the consumer or
489*4882a593Smuzhiyun * supplier devices get deleted when it runs, so put it into the "long"
490*4882a593Smuzhiyun * workqueue.
491*4882a593Smuzhiyun */
492*4882a593Smuzhiyun queue_work(system_long_wq, &link->rm_work);
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun static struct class devlink_class = {
496*4882a593Smuzhiyun .name = "devlink",
497*4882a593Smuzhiyun .owner = THIS_MODULE,
498*4882a593Smuzhiyun .dev_groups = devlink_groups,
499*4882a593Smuzhiyun .dev_release = devlink_dev_release,
500*4882a593Smuzhiyun };
501*4882a593Smuzhiyun
devlink_add_symlinks(struct device * dev,struct class_interface * class_intf)502*4882a593Smuzhiyun static int devlink_add_symlinks(struct device *dev,
503*4882a593Smuzhiyun struct class_interface *class_intf)
504*4882a593Smuzhiyun {
505*4882a593Smuzhiyun int ret;
506*4882a593Smuzhiyun size_t len;
507*4882a593Smuzhiyun struct device_link *link = to_devlink(dev);
508*4882a593Smuzhiyun struct device *sup = link->supplier;
509*4882a593Smuzhiyun struct device *con = link->consumer;
510*4882a593Smuzhiyun char *buf;
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
513*4882a593Smuzhiyun strlen(dev_bus_name(con)) + strlen(dev_name(con)));
514*4882a593Smuzhiyun len += strlen(":");
515*4882a593Smuzhiyun len += strlen("supplier:") + 1;
516*4882a593Smuzhiyun buf = kzalloc(len, GFP_KERNEL);
517*4882a593Smuzhiyun if (!buf)
518*4882a593Smuzhiyun return -ENOMEM;
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun ret = sysfs_create_link(&link->link_dev.kobj, &sup->kobj, "supplier");
521*4882a593Smuzhiyun if (ret)
522*4882a593Smuzhiyun goto out;
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun ret = sysfs_create_link(&link->link_dev.kobj, &con->kobj, "consumer");
525*4882a593Smuzhiyun if (ret)
526*4882a593Smuzhiyun goto err_con;
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
529*4882a593Smuzhiyun ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf);
530*4882a593Smuzhiyun if (ret)
531*4882a593Smuzhiyun goto err_con_dev;
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
534*4882a593Smuzhiyun ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf);
535*4882a593Smuzhiyun if (ret)
536*4882a593Smuzhiyun goto err_sup_dev;
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun goto out;
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun err_sup_dev:
541*4882a593Smuzhiyun snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
542*4882a593Smuzhiyun sysfs_remove_link(&sup->kobj, buf);
543*4882a593Smuzhiyun err_con_dev:
544*4882a593Smuzhiyun sysfs_remove_link(&link->link_dev.kobj, "consumer");
545*4882a593Smuzhiyun err_con:
546*4882a593Smuzhiyun sysfs_remove_link(&link->link_dev.kobj, "supplier");
547*4882a593Smuzhiyun out:
548*4882a593Smuzhiyun kfree(buf);
549*4882a593Smuzhiyun return ret;
550*4882a593Smuzhiyun }
551*4882a593Smuzhiyun
devlink_remove_symlinks(struct device * dev,struct class_interface * class_intf)552*4882a593Smuzhiyun static void devlink_remove_symlinks(struct device *dev,
553*4882a593Smuzhiyun struct class_interface *class_intf)
554*4882a593Smuzhiyun {
555*4882a593Smuzhiyun struct device_link *link = to_devlink(dev);
556*4882a593Smuzhiyun size_t len;
557*4882a593Smuzhiyun struct device *sup = link->supplier;
558*4882a593Smuzhiyun struct device *con = link->consumer;
559*4882a593Smuzhiyun char *buf;
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun sysfs_remove_link(&link->link_dev.kobj, "consumer");
562*4882a593Smuzhiyun sysfs_remove_link(&link->link_dev.kobj, "supplier");
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
565*4882a593Smuzhiyun strlen(dev_bus_name(con)) + strlen(dev_name(con)));
566*4882a593Smuzhiyun len += strlen(":");
567*4882a593Smuzhiyun len += strlen("supplier:") + 1;
568*4882a593Smuzhiyun buf = kzalloc(len, GFP_KERNEL);
569*4882a593Smuzhiyun if (!buf) {
570*4882a593Smuzhiyun WARN(1, "Unable to properly free device link symlinks!\n");
571*4882a593Smuzhiyun return;
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun if (device_is_registered(con)) {
575*4882a593Smuzhiyun snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
576*4882a593Smuzhiyun sysfs_remove_link(&con->kobj, buf);
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
579*4882a593Smuzhiyun sysfs_remove_link(&sup->kobj, buf);
580*4882a593Smuzhiyun kfree(buf);
581*4882a593Smuzhiyun }
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun static struct class_interface devlink_class_intf = {
584*4882a593Smuzhiyun .class = &devlink_class,
585*4882a593Smuzhiyun .add_dev = devlink_add_symlinks,
586*4882a593Smuzhiyun .remove_dev = devlink_remove_symlinks,
587*4882a593Smuzhiyun };
588*4882a593Smuzhiyun
devlink_class_init(void)589*4882a593Smuzhiyun static int __init devlink_class_init(void)
590*4882a593Smuzhiyun {
591*4882a593Smuzhiyun int ret;
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun ret = class_register(&devlink_class);
594*4882a593Smuzhiyun if (ret)
595*4882a593Smuzhiyun return ret;
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun ret = class_interface_register(&devlink_class_intf);
598*4882a593Smuzhiyun if (ret)
599*4882a593Smuzhiyun class_unregister(&devlink_class);
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun return ret;
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun postcore_initcall(devlink_class_init);
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun #define DL_MANAGED_LINK_FLAGS (DL_FLAG_AUTOREMOVE_CONSUMER | \
606*4882a593Smuzhiyun DL_FLAG_AUTOREMOVE_SUPPLIER | \
607*4882a593Smuzhiyun DL_FLAG_AUTOPROBE_CONSUMER | \
608*4882a593Smuzhiyun DL_FLAG_SYNC_STATE_ONLY | \
609*4882a593Smuzhiyun DL_FLAG_INFERRED)
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun #define DL_ADD_VALID_FLAGS (DL_MANAGED_LINK_FLAGS | DL_FLAG_STATELESS | \
612*4882a593Smuzhiyun DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE)
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun /**
615*4882a593Smuzhiyun * device_link_add - Create a link between two devices.
616*4882a593Smuzhiyun * @consumer: Consumer end of the link.
617*4882a593Smuzhiyun * @supplier: Supplier end of the link.
618*4882a593Smuzhiyun * @flags: Link flags.
619*4882a593Smuzhiyun *
620*4882a593Smuzhiyun * The caller is responsible for the proper synchronization of the link creation
621*4882a593Smuzhiyun * with runtime PM. First, setting the DL_FLAG_PM_RUNTIME flag will cause the
622*4882a593Smuzhiyun * runtime PM framework to take the link into account. Second, if the
623*4882a593Smuzhiyun * DL_FLAG_RPM_ACTIVE flag is set in addition to it, the supplier devices will
624*4882a593Smuzhiyun * be forced into the active metastate and reference-counted upon the creation
625*4882a593Smuzhiyun * of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be
626*4882a593Smuzhiyun * ignored.
627*4882a593Smuzhiyun *
628*4882a593Smuzhiyun * If DL_FLAG_STATELESS is set in @flags, the caller of this function is
629*4882a593Smuzhiyun * expected to release the link returned by it directly with the help of either
630*4882a593Smuzhiyun * device_link_del() or device_link_remove().
631*4882a593Smuzhiyun *
632*4882a593Smuzhiyun * If that flag is not set, however, the caller of this function is handing the
633*4882a593Smuzhiyun * management of the link over to the driver core entirely and its return value
634*4882a593Smuzhiyun * can only be used to check whether or not the link is present. In that case,
635*4882a593Smuzhiyun * the DL_FLAG_AUTOREMOVE_CONSUMER and DL_FLAG_AUTOREMOVE_SUPPLIER device link
636*4882a593Smuzhiyun * flags can be used to indicate to the driver core when the link can be safely
637*4882a593Smuzhiyun * deleted. Namely, setting one of them in @flags indicates to the driver core
638*4882a593Smuzhiyun * that the link is not going to be used (by the given caller of this function)
639*4882a593Smuzhiyun * after unbinding the consumer or supplier driver, respectively, from its
640*4882a593Smuzhiyun * device, so the link can be deleted at that point. If none of them is set,
641*4882a593Smuzhiyun * the link will be maintained until one of the devices pointed to by it (either
642*4882a593Smuzhiyun * the consumer or the supplier) is unregistered.
643*4882a593Smuzhiyun *
644*4882a593Smuzhiyun * Also, if DL_FLAG_STATELESS, DL_FLAG_AUTOREMOVE_CONSUMER and
645*4882a593Smuzhiyun * DL_FLAG_AUTOREMOVE_SUPPLIER are not set in @flags (that is, a persistent
646*4882a593Smuzhiyun * managed device link is being added), the DL_FLAG_AUTOPROBE_CONSUMER flag can
647*4882a593Smuzhiyun * be used to request the driver core to automaticall probe for a consmer
648*4882a593Smuzhiyun * driver after successfully binding a driver to the supplier device.
649*4882a593Smuzhiyun *
650*4882a593Smuzhiyun * The combination of DL_FLAG_STATELESS and one of DL_FLAG_AUTOREMOVE_CONSUMER,
651*4882a593Smuzhiyun * DL_FLAG_AUTOREMOVE_SUPPLIER, or DL_FLAG_AUTOPROBE_CONSUMER set in @flags at
652*4882a593Smuzhiyun * the same time is invalid and will cause NULL to be returned upfront.
653*4882a593Smuzhiyun * However, if a device link between the given @consumer and @supplier pair
654*4882a593Smuzhiyun * exists already when this function is called for them, the existing link will
655*4882a593Smuzhiyun * be returned regardless of its current type and status (the link's flags may
656*4882a593Smuzhiyun * be modified then). The caller of this function is then expected to treat
657*4882a593Smuzhiyun * the link as though it has just been created, so (in particular) if
658*4882a593Smuzhiyun * DL_FLAG_STATELESS was passed in @flags, the link needs to be released
659*4882a593Smuzhiyun * explicitly when not needed any more (as stated above).
660*4882a593Smuzhiyun *
661*4882a593Smuzhiyun * A side effect of the link creation is re-ordering of dpm_list and the
662*4882a593Smuzhiyun * devices_kset list by moving the consumer device and all devices depending
663*4882a593Smuzhiyun * on it to the ends of these lists (that does not happen to devices that have
664*4882a593Smuzhiyun * not been registered when this function is called).
665*4882a593Smuzhiyun *
666*4882a593Smuzhiyun * The supplier device is required to be registered when this function is called
667*4882a593Smuzhiyun * and NULL will be returned if that is not the case. The consumer device need
668*4882a593Smuzhiyun * not be registered, however.
669*4882a593Smuzhiyun */
device_link_add(struct device * consumer,struct device * supplier,u32 flags)670*4882a593Smuzhiyun struct device_link *device_link_add(struct device *consumer,
671*4882a593Smuzhiyun struct device *supplier, u32 flags)
672*4882a593Smuzhiyun {
673*4882a593Smuzhiyun struct device_link *link;
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun if (!consumer || !supplier || consumer == supplier ||
676*4882a593Smuzhiyun flags & ~DL_ADD_VALID_FLAGS ||
677*4882a593Smuzhiyun (flags & DL_FLAG_STATELESS && flags & DL_MANAGED_LINK_FLAGS) ||
678*4882a593Smuzhiyun (flags & DL_FLAG_SYNC_STATE_ONLY &&
679*4882a593Smuzhiyun (flags & ~DL_FLAG_INFERRED) != DL_FLAG_SYNC_STATE_ONLY) ||
680*4882a593Smuzhiyun (flags & DL_FLAG_AUTOPROBE_CONSUMER &&
681*4882a593Smuzhiyun flags & (DL_FLAG_AUTOREMOVE_CONSUMER |
682*4882a593Smuzhiyun DL_FLAG_AUTOREMOVE_SUPPLIER)))
683*4882a593Smuzhiyun return NULL;
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun if (flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) {
686*4882a593Smuzhiyun if (pm_runtime_get_sync(supplier) < 0) {
687*4882a593Smuzhiyun pm_runtime_put_noidle(supplier);
688*4882a593Smuzhiyun return NULL;
689*4882a593Smuzhiyun }
690*4882a593Smuzhiyun }
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun if (!(flags & DL_FLAG_STATELESS))
693*4882a593Smuzhiyun flags |= DL_FLAG_MANAGED;
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun device_links_write_lock();
696*4882a593Smuzhiyun device_pm_lock();
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun /*
699*4882a593Smuzhiyun * If the supplier has not been fully registered yet or there is a
700*4882a593Smuzhiyun * reverse (non-SYNC_STATE_ONLY) dependency between the consumer and
701*4882a593Smuzhiyun * the supplier already in the graph, return NULL. If the link is a
702*4882a593Smuzhiyun * SYNC_STATE_ONLY link, we don't check for reverse dependencies
703*4882a593Smuzhiyun * because it only affects sync_state() callbacks.
704*4882a593Smuzhiyun */
705*4882a593Smuzhiyun if (!device_pm_initialized(supplier)
706*4882a593Smuzhiyun || (!(flags & DL_FLAG_SYNC_STATE_ONLY) &&
707*4882a593Smuzhiyun device_is_dependent(consumer, supplier))) {
708*4882a593Smuzhiyun link = NULL;
709*4882a593Smuzhiyun goto out;
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun /*
713*4882a593Smuzhiyun * SYNC_STATE_ONLY links are useless once a consumer device has probed.
714*4882a593Smuzhiyun * So, only create it if the consumer hasn't probed yet.
715*4882a593Smuzhiyun */
716*4882a593Smuzhiyun if (flags & DL_FLAG_SYNC_STATE_ONLY &&
717*4882a593Smuzhiyun consumer->links.status != DL_DEV_NO_DRIVER &&
718*4882a593Smuzhiyun consumer->links.status != DL_DEV_PROBING) {
719*4882a593Smuzhiyun link = NULL;
720*4882a593Smuzhiyun goto out;
721*4882a593Smuzhiyun }
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun /*
724*4882a593Smuzhiyun * DL_FLAG_AUTOREMOVE_SUPPLIER indicates that the link will be needed
725*4882a593Smuzhiyun * longer than for DL_FLAG_AUTOREMOVE_CONSUMER and setting them both
726*4882a593Smuzhiyun * together doesn't make sense, so prefer DL_FLAG_AUTOREMOVE_SUPPLIER.
727*4882a593Smuzhiyun */
728*4882a593Smuzhiyun if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
729*4882a593Smuzhiyun flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun list_for_each_entry(link, &supplier->links.consumers, s_node) {
732*4882a593Smuzhiyun if (link->consumer != consumer)
733*4882a593Smuzhiyun continue;
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun if (link->flags & DL_FLAG_INFERRED &&
736*4882a593Smuzhiyun !(flags & DL_FLAG_INFERRED))
737*4882a593Smuzhiyun link->flags &= ~DL_FLAG_INFERRED;
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun if (flags & DL_FLAG_PM_RUNTIME) {
740*4882a593Smuzhiyun if (!(link->flags & DL_FLAG_PM_RUNTIME)) {
741*4882a593Smuzhiyun pm_runtime_new_link(consumer);
742*4882a593Smuzhiyun link->flags |= DL_FLAG_PM_RUNTIME;
743*4882a593Smuzhiyun }
744*4882a593Smuzhiyun if (flags & DL_FLAG_RPM_ACTIVE)
745*4882a593Smuzhiyun refcount_inc(&link->rpm_active);
746*4882a593Smuzhiyun }
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun if (flags & DL_FLAG_STATELESS) {
749*4882a593Smuzhiyun kref_get(&link->kref);
750*4882a593Smuzhiyun if (link->flags & DL_FLAG_SYNC_STATE_ONLY &&
751*4882a593Smuzhiyun !(link->flags & DL_FLAG_STATELESS)) {
752*4882a593Smuzhiyun link->flags |= DL_FLAG_STATELESS;
753*4882a593Smuzhiyun goto reorder;
754*4882a593Smuzhiyun } else {
755*4882a593Smuzhiyun link->flags |= DL_FLAG_STATELESS;
756*4882a593Smuzhiyun goto out;
757*4882a593Smuzhiyun }
758*4882a593Smuzhiyun }
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun /*
761*4882a593Smuzhiyun * If the life time of the link following from the new flags is
762*4882a593Smuzhiyun * longer than indicated by the flags of the existing link,
763*4882a593Smuzhiyun * update the existing link to stay around longer.
764*4882a593Smuzhiyun */
765*4882a593Smuzhiyun if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) {
766*4882a593Smuzhiyun if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) {
767*4882a593Smuzhiyun link->flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
768*4882a593Smuzhiyun link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER;
769*4882a593Smuzhiyun }
770*4882a593Smuzhiyun } else if (!(flags & DL_FLAG_AUTOREMOVE_CONSUMER)) {
771*4882a593Smuzhiyun link->flags &= ~(DL_FLAG_AUTOREMOVE_CONSUMER |
772*4882a593Smuzhiyun DL_FLAG_AUTOREMOVE_SUPPLIER);
773*4882a593Smuzhiyun }
774*4882a593Smuzhiyun if (!(link->flags & DL_FLAG_MANAGED)) {
775*4882a593Smuzhiyun kref_get(&link->kref);
776*4882a593Smuzhiyun link->flags |= DL_FLAG_MANAGED;
777*4882a593Smuzhiyun device_link_init_status(link, consumer, supplier);
778*4882a593Smuzhiyun }
779*4882a593Smuzhiyun if (link->flags & DL_FLAG_SYNC_STATE_ONLY &&
780*4882a593Smuzhiyun !(flags & DL_FLAG_SYNC_STATE_ONLY)) {
781*4882a593Smuzhiyun link->flags &= ~DL_FLAG_SYNC_STATE_ONLY;
782*4882a593Smuzhiyun goto reorder;
783*4882a593Smuzhiyun }
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun goto out;
786*4882a593Smuzhiyun }
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun link = kzalloc(sizeof(*link), GFP_KERNEL);
789*4882a593Smuzhiyun if (!link)
790*4882a593Smuzhiyun goto out;
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun refcount_set(&link->rpm_active, 1);
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun get_device(supplier);
795*4882a593Smuzhiyun link->supplier = supplier;
796*4882a593Smuzhiyun INIT_LIST_HEAD(&link->s_node);
797*4882a593Smuzhiyun get_device(consumer);
798*4882a593Smuzhiyun link->consumer = consumer;
799*4882a593Smuzhiyun INIT_LIST_HEAD(&link->c_node);
800*4882a593Smuzhiyun link->flags = flags;
801*4882a593Smuzhiyun kref_init(&link->kref);
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun link->link_dev.class = &devlink_class;
804*4882a593Smuzhiyun device_set_pm_not_required(&link->link_dev);
805*4882a593Smuzhiyun dev_set_name(&link->link_dev, "%s:%s--%s:%s",
806*4882a593Smuzhiyun dev_bus_name(supplier), dev_name(supplier),
807*4882a593Smuzhiyun dev_bus_name(consumer), dev_name(consumer));
808*4882a593Smuzhiyun if (device_register(&link->link_dev)) {
809*4882a593Smuzhiyun put_device(&link->link_dev);
810*4882a593Smuzhiyun link = NULL;
811*4882a593Smuzhiyun goto out;
812*4882a593Smuzhiyun }
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun if (flags & DL_FLAG_PM_RUNTIME) {
815*4882a593Smuzhiyun if (flags & DL_FLAG_RPM_ACTIVE)
816*4882a593Smuzhiyun refcount_inc(&link->rpm_active);
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun pm_runtime_new_link(consumer);
819*4882a593Smuzhiyun }
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun /* Determine the initial link state. */
822*4882a593Smuzhiyun if (flags & DL_FLAG_STATELESS)
823*4882a593Smuzhiyun link->status = DL_STATE_NONE;
824*4882a593Smuzhiyun else
825*4882a593Smuzhiyun device_link_init_status(link, consumer, supplier);
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun /*
828*4882a593Smuzhiyun * Some callers expect the link creation during consumer driver probe to
829*4882a593Smuzhiyun * resume the supplier even without DL_FLAG_RPM_ACTIVE.
830*4882a593Smuzhiyun */
831*4882a593Smuzhiyun if (link->status == DL_STATE_CONSUMER_PROBE &&
832*4882a593Smuzhiyun flags & DL_FLAG_PM_RUNTIME)
833*4882a593Smuzhiyun pm_runtime_resume(supplier);
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun list_add_tail_rcu(&link->s_node, &supplier->links.consumers);
836*4882a593Smuzhiyun list_add_tail_rcu(&link->c_node, &consumer->links.suppliers);
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun if (flags & DL_FLAG_SYNC_STATE_ONLY) {
839*4882a593Smuzhiyun dev_dbg(consumer,
840*4882a593Smuzhiyun "Linked as a sync state only consumer to %s\n",
841*4882a593Smuzhiyun dev_name(supplier));
842*4882a593Smuzhiyun goto out;
843*4882a593Smuzhiyun }
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun reorder:
846*4882a593Smuzhiyun /*
847*4882a593Smuzhiyun * Move the consumer and all of the devices depending on it to the end
848*4882a593Smuzhiyun * of dpm_list and the devices_kset list.
849*4882a593Smuzhiyun *
850*4882a593Smuzhiyun * It is necessary to hold dpm_list locked throughout all that or else
851*4882a593Smuzhiyun * we may end up suspending with a wrong ordering of it.
852*4882a593Smuzhiyun */
853*4882a593Smuzhiyun device_reorder_to_tail(consumer, NULL);
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun dev_dbg(consumer, "Linked as a consumer to %s\n", dev_name(supplier));
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun out:
858*4882a593Smuzhiyun device_pm_unlock();
859*4882a593Smuzhiyun device_links_write_unlock();
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun if ((flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) && !link)
862*4882a593Smuzhiyun pm_runtime_put(supplier);
863*4882a593Smuzhiyun
864*4882a593Smuzhiyun return link;
865*4882a593Smuzhiyun }
866*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(device_link_add);
867*4882a593Smuzhiyun
__device_link_del(struct kref * kref)868*4882a593Smuzhiyun static void __device_link_del(struct kref *kref)
869*4882a593Smuzhiyun {
870*4882a593Smuzhiyun struct device_link *link = container_of(kref, struct device_link, kref);
871*4882a593Smuzhiyun
872*4882a593Smuzhiyun dev_dbg(link->consumer, "Dropping the link to %s\n",
873*4882a593Smuzhiyun dev_name(link->supplier));
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun pm_runtime_drop_link(link);
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun device_link_remove_from_lists(link);
878*4882a593Smuzhiyun device_unregister(&link->link_dev);
879*4882a593Smuzhiyun }
880*4882a593Smuzhiyun
device_link_put_kref(struct device_link * link)881*4882a593Smuzhiyun static void device_link_put_kref(struct device_link *link)
882*4882a593Smuzhiyun {
883*4882a593Smuzhiyun if (link->flags & DL_FLAG_STATELESS)
884*4882a593Smuzhiyun kref_put(&link->kref, __device_link_del);
885*4882a593Smuzhiyun else
886*4882a593Smuzhiyun WARN(1, "Unable to drop a managed device link reference\n");
887*4882a593Smuzhiyun }
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun /**
890*4882a593Smuzhiyun * device_link_del - Delete a stateless link between two devices.
891*4882a593Smuzhiyun * @link: Device link to delete.
892*4882a593Smuzhiyun *
893*4882a593Smuzhiyun * The caller must ensure proper synchronization of this function with runtime
894*4882a593Smuzhiyun * PM. If the link was added multiple times, it needs to be deleted as often.
895*4882a593Smuzhiyun * Care is required for hotplugged devices: Their links are purged on removal
896*4882a593Smuzhiyun * and calling device_link_del() is then no longer allowed.
897*4882a593Smuzhiyun */
device_link_del(struct device_link * link)898*4882a593Smuzhiyun void device_link_del(struct device_link *link)
899*4882a593Smuzhiyun {
900*4882a593Smuzhiyun device_links_write_lock();
901*4882a593Smuzhiyun device_link_put_kref(link);
902*4882a593Smuzhiyun device_links_write_unlock();
903*4882a593Smuzhiyun }
904*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(device_link_del);
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun /**
907*4882a593Smuzhiyun * device_link_remove - Delete a stateless link between two devices.
908*4882a593Smuzhiyun * @consumer: Consumer end of the link.
909*4882a593Smuzhiyun * @supplier: Supplier end of the link.
910*4882a593Smuzhiyun *
911*4882a593Smuzhiyun * The caller must ensure proper synchronization of this function with runtime
912*4882a593Smuzhiyun * PM.
913*4882a593Smuzhiyun */
device_link_remove(void * consumer,struct device * supplier)914*4882a593Smuzhiyun void device_link_remove(void *consumer, struct device *supplier)
915*4882a593Smuzhiyun {
916*4882a593Smuzhiyun struct device_link *link;
917*4882a593Smuzhiyun
918*4882a593Smuzhiyun if (WARN_ON(consumer == supplier))
919*4882a593Smuzhiyun return;
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun device_links_write_lock();
922*4882a593Smuzhiyun
923*4882a593Smuzhiyun list_for_each_entry(link, &supplier->links.consumers, s_node) {
924*4882a593Smuzhiyun if (link->consumer == consumer) {
925*4882a593Smuzhiyun device_link_put_kref(link);
926*4882a593Smuzhiyun break;
927*4882a593Smuzhiyun }
928*4882a593Smuzhiyun }
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun device_links_write_unlock();
931*4882a593Smuzhiyun }
932*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(device_link_remove);
933*4882a593Smuzhiyun
device_links_missing_supplier(struct device * dev)934*4882a593Smuzhiyun static void device_links_missing_supplier(struct device *dev)
935*4882a593Smuzhiyun {
936*4882a593Smuzhiyun struct device_link *link;
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun list_for_each_entry(link, &dev->links.suppliers, c_node) {
939*4882a593Smuzhiyun if (link->status != DL_STATE_CONSUMER_PROBE)
940*4882a593Smuzhiyun continue;
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) {
943*4882a593Smuzhiyun WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
944*4882a593Smuzhiyun } else {
945*4882a593Smuzhiyun WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
946*4882a593Smuzhiyun WRITE_ONCE(link->status, DL_STATE_DORMANT);
947*4882a593Smuzhiyun }
948*4882a593Smuzhiyun }
949*4882a593Smuzhiyun }
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun /**
952*4882a593Smuzhiyun * device_links_check_suppliers - Check presence of supplier drivers.
953*4882a593Smuzhiyun * @dev: Consumer device.
954*4882a593Smuzhiyun *
955*4882a593Smuzhiyun * Check links from this device to any suppliers. Walk the list of the device's
956*4882a593Smuzhiyun * links to suppliers and see if all of them are available. If not, simply
957*4882a593Smuzhiyun * return -EPROBE_DEFER.
958*4882a593Smuzhiyun *
959*4882a593Smuzhiyun * We need to guarantee that the supplier will not go away after the check has
960*4882a593Smuzhiyun * been positive here. It only can go away in __device_release_driver() and
961*4882a593Smuzhiyun * that function checks the device's links to consumers. This means we need to
962*4882a593Smuzhiyun * mark the link as "consumer probe in progress" to make the supplier removal
963*4882a593Smuzhiyun * wait for us to complete (or bad things may happen).
964*4882a593Smuzhiyun *
965*4882a593Smuzhiyun * Links without the DL_FLAG_MANAGED flag set are ignored.
966*4882a593Smuzhiyun */
device_links_check_suppliers(struct device * dev)967*4882a593Smuzhiyun int device_links_check_suppliers(struct device *dev)
968*4882a593Smuzhiyun {
969*4882a593Smuzhiyun struct device_link *link;
970*4882a593Smuzhiyun int ret = 0;
971*4882a593Smuzhiyun
972*4882a593Smuzhiyun /*
973*4882a593Smuzhiyun * Device waiting for supplier to become available is not allowed to
974*4882a593Smuzhiyun * probe.
975*4882a593Smuzhiyun */
976*4882a593Smuzhiyun mutex_lock(&fwnode_link_lock);
977*4882a593Smuzhiyun if (dev->fwnode && !list_empty(&dev->fwnode->suppliers) &&
978*4882a593Smuzhiyun !fw_devlink_is_permissive()) {
979*4882a593Smuzhiyun dev_dbg(dev, "probe deferral - wait for supplier %pfwP\n",
980*4882a593Smuzhiyun list_first_entry(&dev->fwnode->suppliers,
981*4882a593Smuzhiyun struct fwnode_link,
982*4882a593Smuzhiyun c_hook)->supplier);
983*4882a593Smuzhiyun mutex_unlock(&fwnode_link_lock);
984*4882a593Smuzhiyun return -EPROBE_DEFER;
985*4882a593Smuzhiyun }
986*4882a593Smuzhiyun mutex_unlock(&fwnode_link_lock);
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun device_links_write_lock();
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun list_for_each_entry(link, &dev->links.suppliers, c_node) {
991*4882a593Smuzhiyun if (!(link->flags & DL_FLAG_MANAGED))
992*4882a593Smuzhiyun continue;
993*4882a593Smuzhiyun
994*4882a593Smuzhiyun if (link->status != DL_STATE_AVAILABLE &&
995*4882a593Smuzhiyun !(link->flags & DL_FLAG_SYNC_STATE_ONLY)) {
996*4882a593Smuzhiyun device_links_missing_supplier(dev);
997*4882a593Smuzhiyun dev_dbg(dev, "probe deferral - supplier %s not ready\n",
998*4882a593Smuzhiyun dev_name(link->supplier));
999*4882a593Smuzhiyun ret = -EPROBE_DEFER;
1000*4882a593Smuzhiyun break;
1001*4882a593Smuzhiyun }
1002*4882a593Smuzhiyun WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE);
1003*4882a593Smuzhiyun }
1004*4882a593Smuzhiyun dev->links.status = DL_DEV_PROBING;
1005*4882a593Smuzhiyun
1006*4882a593Smuzhiyun device_links_write_unlock();
1007*4882a593Smuzhiyun return ret;
1008*4882a593Smuzhiyun }
1009*4882a593Smuzhiyun
1010*4882a593Smuzhiyun /**
1011*4882a593Smuzhiyun * __device_links_queue_sync_state - Queue a device for sync_state() callback
1012*4882a593Smuzhiyun * @dev: Device to call sync_state() on
1013*4882a593Smuzhiyun * @list: List head to queue the @dev on
1014*4882a593Smuzhiyun *
1015*4882a593Smuzhiyun * Queues a device for a sync_state() callback when the device links write lock
1016*4882a593Smuzhiyun * isn't held. This allows the sync_state() execution flow to use device links
1017*4882a593Smuzhiyun * APIs. The caller must ensure this function is called with
1018*4882a593Smuzhiyun * device_links_write_lock() held.
1019*4882a593Smuzhiyun *
1020*4882a593Smuzhiyun * This function does a get_device() to make sure the device is not freed while
1021*4882a593Smuzhiyun * on this list.
1022*4882a593Smuzhiyun *
1023*4882a593Smuzhiyun * So the caller must also ensure that device_links_flush_sync_list() is called
1024*4882a593Smuzhiyun * as soon as the caller releases device_links_write_lock(). This is necessary
1025*4882a593Smuzhiyun * to make sure the sync_state() is called in a timely fashion and the
1026*4882a593Smuzhiyun * put_device() is called on this device.
1027*4882a593Smuzhiyun */
__device_links_queue_sync_state(struct device * dev,struct list_head * list)1028*4882a593Smuzhiyun static void __device_links_queue_sync_state(struct device *dev,
1029*4882a593Smuzhiyun struct list_head *list)
1030*4882a593Smuzhiyun {
1031*4882a593Smuzhiyun struct device_link *link;
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun if (!dev_has_sync_state(dev))
1034*4882a593Smuzhiyun return;
1035*4882a593Smuzhiyun if (dev->state_synced)
1036*4882a593Smuzhiyun return;
1037*4882a593Smuzhiyun
1038*4882a593Smuzhiyun list_for_each_entry(link, &dev->links.consumers, s_node) {
1039*4882a593Smuzhiyun if (!(link->flags & DL_FLAG_MANAGED))
1040*4882a593Smuzhiyun continue;
1041*4882a593Smuzhiyun if (link->status != DL_STATE_ACTIVE)
1042*4882a593Smuzhiyun return;
1043*4882a593Smuzhiyun }
1044*4882a593Smuzhiyun
1045*4882a593Smuzhiyun /*
1046*4882a593Smuzhiyun * Set the flag here to avoid adding the same device to a list more
1047*4882a593Smuzhiyun * than once. This can happen if new consumers get added to the device
1048*4882a593Smuzhiyun * and probed before the list is flushed.
1049*4882a593Smuzhiyun */
1050*4882a593Smuzhiyun dev->state_synced = true;
1051*4882a593Smuzhiyun
1052*4882a593Smuzhiyun if (WARN_ON(!list_empty(&dev->links.defer_sync)))
1053*4882a593Smuzhiyun return;
1054*4882a593Smuzhiyun
1055*4882a593Smuzhiyun get_device(dev);
1056*4882a593Smuzhiyun list_add_tail(&dev->links.defer_sync, list);
1057*4882a593Smuzhiyun }
1058*4882a593Smuzhiyun
1059*4882a593Smuzhiyun /**
1060*4882a593Smuzhiyun * device_links_flush_sync_list - Call sync_state() on a list of devices
1061*4882a593Smuzhiyun * @list: List of devices to call sync_state() on
1062*4882a593Smuzhiyun * @dont_lock_dev: Device for which lock is already held by the caller
1063*4882a593Smuzhiyun *
1064*4882a593Smuzhiyun * Calls sync_state() on all the devices that have been queued for it. This
1065*4882a593Smuzhiyun * function is used in conjunction with __device_links_queue_sync_state(). The
1066*4882a593Smuzhiyun * @dont_lock_dev parameter is useful when this function is called from a
1067*4882a593Smuzhiyun * context where a device lock is already held.
1068*4882a593Smuzhiyun */
device_links_flush_sync_list(struct list_head * list,struct device * dont_lock_dev)1069*4882a593Smuzhiyun static void device_links_flush_sync_list(struct list_head *list,
1070*4882a593Smuzhiyun struct device *dont_lock_dev)
1071*4882a593Smuzhiyun {
1072*4882a593Smuzhiyun struct device *dev, *tmp;
1073*4882a593Smuzhiyun
1074*4882a593Smuzhiyun list_for_each_entry_safe(dev, tmp, list, links.defer_sync) {
1075*4882a593Smuzhiyun list_del_init(&dev->links.defer_sync);
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun if (dev != dont_lock_dev)
1078*4882a593Smuzhiyun device_lock(dev);
1079*4882a593Smuzhiyun
1080*4882a593Smuzhiyun if (dev->bus->sync_state)
1081*4882a593Smuzhiyun dev->bus->sync_state(dev);
1082*4882a593Smuzhiyun else if (dev->driver && dev->driver->sync_state)
1083*4882a593Smuzhiyun dev->driver->sync_state(dev);
1084*4882a593Smuzhiyun
1085*4882a593Smuzhiyun if (dev != dont_lock_dev)
1086*4882a593Smuzhiyun device_unlock(dev);
1087*4882a593Smuzhiyun
1088*4882a593Smuzhiyun put_device(dev);
1089*4882a593Smuzhiyun }
1090*4882a593Smuzhiyun }
1091*4882a593Smuzhiyun
device_links_supplier_sync_state_pause(void)1092*4882a593Smuzhiyun void device_links_supplier_sync_state_pause(void)
1093*4882a593Smuzhiyun {
1094*4882a593Smuzhiyun device_links_write_lock();
1095*4882a593Smuzhiyun defer_sync_state_count++;
1096*4882a593Smuzhiyun device_links_write_unlock();
1097*4882a593Smuzhiyun }
1098*4882a593Smuzhiyun
device_links_supplier_sync_state_resume(void)1099*4882a593Smuzhiyun void device_links_supplier_sync_state_resume(void)
1100*4882a593Smuzhiyun {
1101*4882a593Smuzhiyun struct device *dev, *tmp;
1102*4882a593Smuzhiyun LIST_HEAD(sync_list);
1103*4882a593Smuzhiyun
1104*4882a593Smuzhiyun device_links_write_lock();
1105*4882a593Smuzhiyun if (!defer_sync_state_count) {
1106*4882a593Smuzhiyun WARN(true, "Unmatched sync_state pause/resume!");
1107*4882a593Smuzhiyun goto out;
1108*4882a593Smuzhiyun }
1109*4882a593Smuzhiyun defer_sync_state_count--;
1110*4882a593Smuzhiyun if (defer_sync_state_count)
1111*4882a593Smuzhiyun goto out;
1112*4882a593Smuzhiyun
1113*4882a593Smuzhiyun list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_sync) {
1114*4882a593Smuzhiyun /*
1115*4882a593Smuzhiyun * Delete from deferred_sync list before queuing it to
1116*4882a593Smuzhiyun * sync_list because defer_sync is used for both lists.
1117*4882a593Smuzhiyun */
1118*4882a593Smuzhiyun list_del_init(&dev->links.defer_sync);
1119*4882a593Smuzhiyun __device_links_queue_sync_state(dev, &sync_list);
1120*4882a593Smuzhiyun }
1121*4882a593Smuzhiyun out:
1122*4882a593Smuzhiyun device_links_write_unlock();
1123*4882a593Smuzhiyun
1124*4882a593Smuzhiyun device_links_flush_sync_list(&sync_list, NULL);
1125*4882a593Smuzhiyun }
1126*4882a593Smuzhiyun
sync_state_resume_initcall(void)1127*4882a593Smuzhiyun static int sync_state_resume_initcall(void)
1128*4882a593Smuzhiyun {
1129*4882a593Smuzhiyun device_links_supplier_sync_state_resume();
1130*4882a593Smuzhiyun return 0;
1131*4882a593Smuzhiyun }
1132*4882a593Smuzhiyun late_initcall(sync_state_resume_initcall);
1133*4882a593Smuzhiyun
__device_links_supplier_defer_sync(struct device * sup)1134*4882a593Smuzhiyun static void __device_links_supplier_defer_sync(struct device *sup)
1135*4882a593Smuzhiyun {
1136*4882a593Smuzhiyun if (list_empty(&sup->links.defer_sync) && dev_has_sync_state(sup))
1137*4882a593Smuzhiyun list_add_tail(&sup->links.defer_sync, &deferred_sync);
1138*4882a593Smuzhiyun }
1139*4882a593Smuzhiyun
device_link_drop_managed(struct device_link * link)1140*4882a593Smuzhiyun static void device_link_drop_managed(struct device_link *link)
1141*4882a593Smuzhiyun {
1142*4882a593Smuzhiyun link->flags &= ~DL_FLAG_MANAGED;
1143*4882a593Smuzhiyun WRITE_ONCE(link->status, DL_STATE_NONE);
1144*4882a593Smuzhiyun kref_put(&link->kref, __device_link_del);
1145*4882a593Smuzhiyun }
1146*4882a593Smuzhiyun
waiting_for_supplier_show(struct device * dev,struct device_attribute * attr,char * buf)1147*4882a593Smuzhiyun static ssize_t waiting_for_supplier_show(struct device *dev,
1148*4882a593Smuzhiyun struct device_attribute *attr,
1149*4882a593Smuzhiyun char *buf)
1150*4882a593Smuzhiyun {
1151*4882a593Smuzhiyun bool val;
1152*4882a593Smuzhiyun
1153*4882a593Smuzhiyun device_lock(dev);
1154*4882a593Smuzhiyun val = !list_empty(&dev->fwnode->suppliers);
1155*4882a593Smuzhiyun device_unlock(dev);
1156*4882a593Smuzhiyun return sysfs_emit(buf, "%u\n", val);
1157*4882a593Smuzhiyun }
1158*4882a593Smuzhiyun static DEVICE_ATTR_RO(waiting_for_supplier);
1159*4882a593Smuzhiyun
1160*4882a593Smuzhiyun /**
1161*4882a593Smuzhiyun * device_links_driver_bound - Update device links after probing its driver.
1162*4882a593Smuzhiyun * @dev: Device to update the links for.
1163*4882a593Smuzhiyun *
1164*4882a593Smuzhiyun * The probe has been successful, so update links from this device to any
1165*4882a593Smuzhiyun * consumers by changing their status to "available".
1166*4882a593Smuzhiyun *
1167*4882a593Smuzhiyun * Also change the status of @dev's links to suppliers to "active".
1168*4882a593Smuzhiyun *
1169*4882a593Smuzhiyun * Links without the DL_FLAG_MANAGED flag set are ignored.
1170*4882a593Smuzhiyun */
device_links_driver_bound(struct device * dev)1171*4882a593Smuzhiyun void device_links_driver_bound(struct device *dev)
1172*4882a593Smuzhiyun {
1173*4882a593Smuzhiyun struct device_link *link, *ln;
1174*4882a593Smuzhiyun LIST_HEAD(sync_list);
1175*4882a593Smuzhiyun
1176*4882a593Smuzhiyun /*
1177*4882a593Smuzhiyun * If a device binds successfully, it's expected to have created all
1178*4882a593Smuzhiyun * the device links it needs to or make new device links as it needs
1179*4882a593Smuzhiyun * them. So, fw_devlink no longer needs to create device links to any
1180*4882a593Smuzhiyun * of the device's suppliers.
1181*4882a593Smuzhiyun *
1182*4882a593Smuzhiyun * Also, if a child firmware node of this bound device is not added as
1183*4882a593Smuzhiyun * a device by now, assume it is never going to be added and make sure
1184*4882a593Smuzhiyun * other devices don't defer probe indefinitely by waiting for such a
1185*4882a593Smuzhiyun * child device.
1186*4882a593Smuzhiyun */
1187*4882a593Smuzhiyun if (dev->fwnode && dev->fwnode->dev == dev) {
1188*4882a593Smuzhiyun struct fwnode_handle *child;
1189*4882a593Smuzhiyun fwnode_links_purge_suppliers(dev->fwnode);
1190*4882a593Smuzhiyun fwnode_for_each_available_child_node(dev->fwnode, child)
1191*4882a593Smuzhiyun fw_devlink_purge_absent_suppliers(child);
1192*4882a593Smuzhiyun }
1193*4882a593Smuzhiyun device_remove_file(dev, &dev_attr_waiting_for_supplier);
1194*4882a593Smuzhiyun
1195*4882a593Smuzhiyun device_links_write_lock();
1196*4882a593Smuzhiyun
1197*4882a593Smuzhiyun list_for_each_entry(link, &dev->links.consumers, s_node) {
1198*4882a593Smuzhiyun if (!(link->flags & DL_FLAG_MANAGED))
1199*4882a593Smuzhiyun continue;
1200*4882a593Smuzhiyun
1201*4882a593Smuzhiyun /*
1202*4882a593Smuzhiyun * Links created during consumer probe may be in the "consumer
1203*4882a593Smuzhiyun * probe" state to start with if the supplier is still probing
1204*4882a593Smuzhiyun * when they are created and they may become "active" if the
1205*4882a593Smuzhiyun * consumer probe returns first. Skip them here.
1206*4882a593Smuzhiyun */
1207*4882a593Smuzhiyun if (link->status == DL_STATE_CONSUMER_PROBE ||
1208*4882a593Smuzhiyun link->status == DL_STATE_ACTIVE)
1209*4882a593Smuzhiyun continue;
1210*4882a593Smuzhiyun
1211*4882a593Smuzhiyun WARN_ON(link->status != DL_STATE_DORMANT);
1212*4882a593Smuzhiyun WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
1213*4882a593Smuzhiyun
1214*4882a593Smuzhiyun if (link->flags & DL_FLAG_AUTOPROBE_CONSUMER)
1215*4882a593Smuzhiyun driver_deferred_probe_add(link->consumer);
1216*4882a593Smuzhiyun }
1217*4882a593Smuzhiyun
1218*4882a593Smuzhiyun if (defer_sync_state_count)
1219*4882a593Smuzhiyun __device_links_supplier_defer_sync(dev);
1220*4882a593Smuzhiyun else
1221*4882a593Smuzhiyun __device_links_queue_sync_state(dev, &sync_list);
1222*4882a593Smuzhiyun
1223*4882a593Smuzhiyun list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) {
1224*4882a593Smuzhiyun struct device *supplier;
1225*4882a593Smuzhiyun
1226*4882a593Smuzhiyun if (!(link->flags & DL_FLAG_MANAGED))
1227*4882a593Smuzhiyun continue;
1228*4882a593Smuzhiyun
1229*4882a593Smuzhiyun supplier = link->supplier;
1230*4882a593Smuzhiyun if (link->flags & DL_FLAG_SYNC_STATE_ONLY) {
1231*4882a593Smuzhiyun /*
1232*4882a593Smuzhiyun * When DL_FLAG_SYNC_STATE_ONLY is set, it means no
1233*4882a593Smuzhiyun * other DL_MANAGED_LINK_FLAGS have been set. So, it's
1234*4882a593Smuzhiyun * save to drop the managed link completely.
1235*4882a593Smuzhiyun */
1236*4882a593Smuzhiyun device_link_drop_managed(link);
1237*4882a593Smuzhiyun } else {
1238*4882a593Smuzhiyun WARN_ON(link->status != DL_STATE_CONSUMER_PROBE);
1239*4882a593Smuzhiyun WRITE_ONCE(link->status, DL_STATE_ACTIVE);
1240*4882a593Smuzhiyun }
1241*4882a593Smuzhiyun
1242*4882a593Smuzhiyun /*
1243*4882a593Smuzhiyun * This needs to be done even for the deleted
1244*4882a593Smuzhiyun * DL_FLAG_SYNC_STATE_ONLY device link in case it was the last
1245*4882a593Smuzhiyun * device link that was preventing the supplier from getting a
1246*4882a593Smuzhiyun * sync_state() call.
1247*4882a593Smuzhiyun */
1248*4882a593Smuzhiyun if (defer_sync_state_count)
1249*4882a593Smuzhiyun __device_links_supplier_defer_sync(supplier);
1250*4882a593Smuzhiyun else
1251*4882a593Smuzhiyun __device_links_queue_sync_state(supplier, &sync_list);
1252*4882a593Smuzhiyun }
1253*4882a593Smuzhiyun
1254*4882a593Smuzhiyun dev->links.status = DL_DEV_DRIVER_BOUND;
1255*4882a593Smuzhiyun
1256*4882a593Smuzhiyun device_links_write_unlock();
1257*4882a593Smuzhiyun
1258*4882a593Smuzhiyun device_links_flush_sync_list(&sync_list, dev);
1259*4882a593Smuzhiyun }
1260*4882a593Smuzhiyun
1261*4882a593Smuzhiyun /**
1262*4882a593Smuzhiyun * __device_links_no_driver - Update links of a device without a driver.
1263*4882a593Smuzhiyun * @dev: Device without a drvier.
1264*4882a593Smuzhiyun *
1265*4882a593Smuzhiyun * Delete all non-persistent links from this device to any suppliers.
1266*4882a593Smuzhiyun *
1267*4882a593Smuzhiyun * Persistent links stay around, but their status is changed to "available",
1268*4882a593Smuzhiyun * unless they already are in the "supplier unbind in progress" state in which
1269*4882a593Smuzhiyun * case they need not be updated.
1270*4882a593Smuzhiyun *
1271*4882a593Smuzhiyun * Links without the DL_FLAG_MANAGED flag set are ignored.
1272*4882a593Smuzhiyun */
__device_links_no_driver(struct device * dev)1273*4882a593Smuzhiyun static void __device_links_no_driver(struct device *dev)
1274*4882a593Smuzhiyun {
1275*4882a593Smuzhiyun struct device_link *link, *ln;
1276*4882a593Smuzhiyun
1277*4882a593Smuzhiyun list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
1278*4882a593Smuzhiyun if (!(link->flags & DL_FLAG_MANAGED))
1279*4882a593Smuzhiyun continue;
1280*4882a593Smuzhiyun
1281*4882a593Smuzhiyun if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) {
1282*4882a593Smuzhiyun device_link_drop_managed(link);
1283*4882a593Smuzhiyun continue;
1284*4882a593Smuzhiyun }
1285*4882a593Smuzhiyun
1286*4882a593Smuzhiyun if (link->status != DL_STATE_CONSUMER_PROBE &&
1287*4882a593Smuzhiyun link->status != DL_STATE_ACTIVE)
1288*4882a593Smuzhiyun continue;
1289*4882a593Smuzhiyun
1290*4882a593Smuzhiyun if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) {
1291*4882a593Smuzhiyun WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
1292*4882a593Smuzhiyun } else {
1293*4882a593Smuzhiyun WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
1294*4882a593Smuzhiyun WRITE_ONCE(link->status, DL_STATE_DORMANT);
1295*4882a593Smuzhiyun }
1296*4882a593Smuzhiyun }
1297*4882a593Smuzhiyun
1298*4882a593Smuzhiyun dev->links.status = DL_DEV_NO_DRIVER;
1299*4882a593Smuzhiyun }
1300*4882a593Smuzhiyun
1301*4882a593Smuzhiyun /**
1302*4882a593Smuzhiyun * device_links_no_driver - Update links after failing driver probe.
1303*4882a593Smuzhiyun * @dev: Device whose driver has just failed to probe.
1304*4882a593Smuzhiyun *
1305*4882a593Smuzhiyun * Clean up leftover links to consumers for @dev and invoke
1306*4882a593Smuzhiyun * %__device_links_no_driver() to update links to suppliers for it as
1307*4882a593Smuzhiyun * appropriate.
1308*4882a593Smuzhiyun *
1309*4882a593Smuzhiyun * Links without the DL_FLAG_MANAGED flag set are ignored.
1310*4882a593Smuzhiyun */
device_links_no_driver(struct device * dev)1311*4882a593Smuzhiyun void device_links_no_driver(struct device *dev)
1312*4882a593Smuzhiyun {
1313*4882a593Smuzhiyun struct device_link *link;
1314*4882a593Smuzhiyun
1315*4882a593Smuzhiyun device_links_write_lock();
1316*4882a593Smuzhiyun
1317*4882a593Smuzhiyun list_for_each_entry(link, &dev->links.consumers, s_node) {
1318*4882a593Smuzhiyun if (!(link->flags & DL_FLAG_MANAGED))
1319*4882a593Smuzhiyun continue;
1320*4882a593Smuzhiyun
1321*4882a593Smuzhiyun /*
1322*4882a593Smuzhiyun * The probe has failed, so if the status of the link is
1323*4882a593Smuzhiyun * "consumer probe" or "active", it must have been added by
1324*4882a593Smuzhiyun * a probing consumer while this device was still probing.
1325*4882a593Smuzhiyun * Change its state to "dormant", as it represents a valid
1326*4882a593Smuzhiyun * relationship, but it is not functionally meaningful.
1327*4882a593Smuzhiyun */
1328*4882a593Smuzhiyun if (link->status == DL_STATE_CONSUMER_PROBE ||
1329*4882a593Smuzhiyun link->status == DL_STATE_ACTIVE)
1330*4882a593Smuzhiyun WRITE_ONCE(link->status, DL_STATE_DORMANT);
1331*4882a593Smuzhiyun }
1332*4882a593Smuzhiyun
1333*4882a593Smuzhiyun __device_links_no_driver(dev);
1334*4882a593Smuzhiyun
1335*4882a593Smuzhiyun device_links_write_unlock();
1336*4882a593Smuzhiyun }
1337*4882a593Smuzhiyun
1338*4882a593Smuzhiyun /**
1339*4882a593Smuzhiyun * device_links_driver_cleanup - Update links after driver removal.
1340*4882a593Smuzhiyun * @dev: Device whose driver has just gone away.
1341*4882a593Smuzhiyun *
1342*4882a593Smuzhiyun * Update links to consumers for @dev by changing their status to "dormant" and
1343*4882a593Smuzhiyun * invoke %__device_links_no_driver() to update links to suppliers for it as
1344*4882a593Smuzhiyun * appropriate.
1345*4882a593Smuzhiyun *
1346*4882a593Smuzhiyun * Links without the DL_FLAG_MANAGED flag set are ignored.
1347*4882a593Smuzhiyun */
device_links_driver_cleanup(struct device * dev)1348*4882a593Smuzhiyun void device_links_driver_cleanup(struct device *dev)
1349*4882a593Smuzhiyun {
1350*4882a593Smuzhiyun struct device_link *link, *ln;
1351*4882a593Smuzhiyun
1352*4882a593Smuzhiyun device_links_write_lock();
1353*4882a593Smuzhiyun
1354*4882a593Smuzhiyun list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) {
1355*4882a593Smuzhiyun if (!(link->flags & DL_FLAG_MANAGED))
1356*4882a593Smuzhiyun continue;
1357*4882a593Smuzhiyun
1358*4882a593Smuzhiyun WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER);
1359*4882a593Smuzhiyun WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND);
1360*4882a593Smuzhiyun
1361*4882a593Smuzhiyun /*
1362*4882a593Smuzhiyun * autoremove the links between this @dev and its consumer
1363*4882a593Smuzhiyun * devices that are not active, i.e. where the link state
1364*4882a593Smuzhiyun * has moved to DL_STATE_SUPPLIER_UNBIND.
1365*4882a593Smuzhiyun */
1366*4882a593Smuzhiyun if (link->status == DL_STATE_SUPPLIER_UNBIND &&
1367*4882a593Smuzhiyun link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
1368*4882a593Smuzhiyun device_link_drop_managed(link);
1369*4882a593Smuzhiyun
1370*4882a593Smuzhiyun WRITE_ONCE(link->status, DL_STATE_DORMANT);
1371*4882a593Smuzhiyun }
1372*4882a593Smuzhiyun
1373*4882a593Smuzhiyun list_del_init(&dev->links.defer_sync);
1374*4882a593Smuzhiyun __device_links_no_driver(dev);
1375*4882a593Smuzhiyun
1376*4882a593Smuzhiyun device_links_write_unlock();
1377*4882a593Smuzhiyun }
1378*4882a593Smuzhiyun
1379*4882a593Smuzhiyun /**
1380*4882a593Smuzhiyun * device_links_busy - Check if there are any busy links to consumers.
1381*4882a593Smuzhiyun * @dev: Device to check.
1382*4882a593Smuzhiyun *
1383*4882a593Smuzhiyun * Check each consumer of the device and return 'true' if its link's status
1384*4882a593Smuzhiyun * is one of "consumer probe" or "active" (meaning that the given consumer is
1385*4882a593Smuzhiyun * probing right now or its driver is present). Otherwise, change the link
1386*4882a593Smuzhiyun * state to "supplier unbind" to prevent the consumer from being probed
1387*4882a593Smuzhiyun * successfully going forward.
1388*4882a593Smuzhiyun *
1389*4882a593Smuzhiyun * Return 'false' if there are no probing or active consumers.
1390*4882a593Smuzhiyun *
1391*4882a593Smuzhiyun * Links without the DL_FLAG_MANAGED flag set are ignored.
1392*4882a593Smuzhiyun */
device_links_busy(struct device * dev)1393*4882a593Smuzhiyun bool device_links_busy(struct device *dev)
1394*4882a593Smuzhiyun {
1395*4882a593Smuzhiyun struct device_link *link;
1396*4882a593Smuzhiyun bool ret = false;
1397*4882a593Smuzhiyun
1398*4882a593Smuzhiyun device_links_write_lock();
1399*4882a593Smuzhiyun
1400*4882a593Smuzhiyun list_for_each_entry(link, &dev->links.consumers, s_node) {
1401*4882a593Smuzhiyun if (!(link->flags & DL_FLAG_MANAGED))
1402*4882a593Smuzhiyun continue;
1403*4882a593Smuzhiyun
1404*4882a593Smuzhiyun if (link->status == DL_STATE_CONSUMER_PROBE
1405*4882a593Smuzhiyun || link->status == DL_STATE_ACTIVE) {
1406*4882a593Smuzhiyun ret = true;
1407*4882a593Smuzhiyun break;
1408*4882a593Smuzhiyun }
1409*4882a593Smuzhiyun WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
1410*4882a593Smuzhiyun }
1411*4882a593Smuzhiyun
1412*4882a593Smuzhiyun dev->links.status = DL_DEV_UNBINDING;
1413*4882a593Smuzhiyun
1414*4882a593Smuzhiyun device_links_write_unlock();
1415*4882a593Smuzhiyun return ret;
1416*4882a593Smuzhiyun }
1417*4882a593Smuzhiyun
1418*4882a593Smuzhiyun /**
1419*4882a593Smuzhiyun * device_links_unbind_consumers - Force unbind consumers of the given device.
1420*4882a593Smuzhiyun * @dev: Device to unbind the consumers of.
1421*4882a593Smuzhiyun *
1422*4882a593Smuzhiyun * Walk the list of links to consumers for @dev and if any of them is in the
1423*4882a593Smuzhiyun * "consumer probe" state, wait for all device probes in progress to complete
1424*4882a593Smuzhiyun * and start over.
1425*4882a593Smuzhiyun *
1426*4882a593Smuzhiyun * If that's not the case, change the status of the link to "supplier unbind"
1427*4882a593Smuzhiyun * and check if the link was in the "active" state. If so, force the consumer
1428*4882a593Smuzhiyun * driver to unbind and start over (the consumer will not re-probe as we have
1429*4882a593Smuzhiyun * changed the state of the link already).
1430*4882a593Smuzhiyun *
1431*4882a593Smuzhiyun * Links without the DL_FLAG_MANAGED flag set are ignored.
1432*4882a593Smuzhiyun */
device_links_unbind_consumers(struct device * dev)1433*4882a593Smuzhiyun void device_links_unbind_consumers(struct device *dev)
1434*4882a593Smuzhiyun {
1435*4882a593Smuzhiyun struct device_link *link;
1436*4882a593Smuzhiyun
1437*4882a593Smuzhiyun start:
1438*4882a593Smuzhiyun device_links_write_lock();
1439*4882a593Smuzhiyun
1440*4882a593Smuzhiyun list_for_each_entry(link, &dev->links.consumers, s_node) {
1441*4882a593Smuzhiyun enum device_link_state status;
1442*4882a593Smuzhiyun
1443*4882a593Smuzhiyun if (!(link->flags & DL_FLAG_MANAGED) ||
1444*4882a593Smuzhiyun link->flags & DL_FLAG_SYNC_STATE_ONLY)
1445*4882a593Smuzhiyun continue;
1446*4882a593Smuzhiyun
1447*4882a593Smuzhiyun status = link->status;
1448*4882a593Smuzhiyun if (status == DL_STATE_CONSUMER_PROBE) {
1449*4882a593Smuzhiyun device_links_write_unlock();
1450*4882a593Smuzhiyun
1451*4882a593Smuzhiyun wait_for_device_probe();
1452*4882a593Smuzhiyun goto start;
1453*4882a593Smuzhiyun }
1454*4882a593Smuzhiyun WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
1455*4882a593Smuzhiyun if (status == DL_STATE_ACTIVE) {
1456*4882a593Smuzhiyun struct device *consumer = link->consumer;
1457*4882a593Smuzhiyun
1458*4882a593Smuzhiyun get_device(consumer);
1459*4882a593Smuzhiyun
1460*4882a593Smuzhiyun device_links_write_unlock();
1461*4882a593Smuzhiyun
1462*4882a593Smuzhiyun device_release_driver_internal(consumer, NULL,
1463*4882a593Smuzhiyun consumer->parent);
1464*4882a593Smuzhiyun put_device(consumer);
1465*4882a593Smuzhiyun goto start;
1466*4882a593Smuzhiyun }
1467*4882a593Smuzhiyun }
1468*4882a593Smuzhiyun
1469*4882a593Smuzhiyun device_links_write_unlock();
1470*4882a593Smuzhiyun }
1471*4882a593Smuzhiyun
1472*4882a593Smuzhiyun /**
1473*4882a593Smuzhiyun * device_links_purge - Delete existing links to other devices.
1474*4882a593Smuzhiyun * @dev: Target device.
1475*4882a593Smuzhiyun */
device_links_purge(struct device * dev)1476*4882a593Smuzhiyun static void device_links_purge(struct device *dev)
1477*4882a593Smuzhiyun {
1478*4882a593Smuzhiyun struct device_link *link, *ln;
1479*4882a593Smuzhiyun
1480*4882a593Smuzhiyun if (dev->class == &devlink_class)
1481*4882a593Smuzhiyun return;
1482*4882a593Smuzhiyun
1483*4882a593Smuzhiyun /*
1484*4882a593Smuzhiyun * Delete all of the remaining links from this device to any other
1485*4882a593Smuzhiyun * devices (either consumers or suppliers).
1486*4882a593Smuzhiyun */
1487*4882a593Smuzhiyun device_links_write_lock();
1488*4882a593Smuzhiyun
1489*4882a593Smuzhiyun list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
1490*4882a593Smuzhiyun WARN_ON(link->status == DL_STATE_ACTIVE);
1491*4882a593Smuzhiyun __device_link_del(&link->kref);
1492*4882a593Smuzhiyun }
1493*4882a593Smuzhiyun
1494*4882a593Smuzhiyun list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) {
1495*4882a593Smuzhiyun WARN_ON(link->status != DL_STATE_DORMANT &&
1496*4882a593Smuzhiyun link->status != DL_STATE_NONE);
1497*4882a593Smuzhiyun __device_link_del(&link->kref);
1498*4882a593Smuzhiyun }
1499*4882a593Smuzhiyun
1500*4882a593Smuzhiyun device_links_write_unlock();
1501*4882a593Smuzhiyun }
1502*4882a593Smuzhiyun
1503*4882a593Smuzhiyun #define FW_DEVLINK_FLAGS_PERMISSIVE (DL_FLAG_INFERRED | \
1504*4882a593Smuzhiyun DL_FLAG_SYNC_STATE_ONLY)
1505*4882a593Smuzhiyun #define FW_DEVLINK_FLAGS_ON (DL_FLAG_INFERRED | \
1506*4882a593Smuzhiyun DL_FLAG_AUTOPROBE_CONSUMER)
1507*4882a593Smuzhiyun #define FW_DEVLINK_FLAGS_RPM (FW_DEVLINK_FLAGS_ON | \
1508*4882a593Smuzhiyun DL_FLAG_PM_RUNTIME)
1509*4882a593Smuzhiyun
1510*4882a593Smuzhiyun static u32 fw_devlink_flags = FW_DEVLINK_FLAGS_ON;
fw_devlink_setup(char * arg)1511*4882a593Smuzhiyun static int __init fw_devlink_setup(char *arg)
1512*4882a593Smuzhiyun {
1513*4882a593Smuzhiyun if (!arg)
1514*4882a593Smuzhiyun return -EINVAL;
1515*4882a593Smuzhiyun
1516*4882a593Smuzhiyun if (strcmp(arg, "off") == 0) {
1517*4882a593Smuzhiyun fw_devlink_flags = 0;
1518*4882a593Smuzhiyun } else if (strcmp(arg, "permissive") == 0) {
1519*4882a593Smuzhiyun fw_devlink_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
1520*4882a593Smuzhiyun } else if (strcmp(arg, "on") == 0) {
1521*4882a593Smuzhiyun fw_devlink_flags = FW_DEVLINK_FLAGS_ON;
1522*4882a593Smuzhiyun } else if (strcmp(arg, "rpm") == 0) {
1523*4882a593Smuzhiyun fw_devlink_flags = FW_DEVLINK_FLAGS_RPM;
1524*4882a593Smuzhiyun }
1525*4882a593Smuzhiyun return 0;
1526*4882a593Smuzhiyun }
1527*4882a593Smuzhiyun early_param("fw_devlink", fw_devlink_setup);
1528*4882a593Smuzhiyun
1529*4882a593Smuzhiyun static bool fw_devlink_strict = true;
fw_devlink_strict_setup(char * arg)1530*4882a593Smuzhiyun static int __init fw_devlink_strict_setup(char *arg)
1531*4882a593Smuzhiyun {
1532*4882a593Smuzhiyun return strtobool(arg, &fw_devlink_strict);
1533*4882a593Smuzhiyun }
1534*4882a593Smuzhiyun early_param("fw_devlink.strict", fw_devlink_strict_setup);
1535*4882a593Smuzhiyun
fw_devlink_get_flags(void)1536*4882a593Smuzhiyun u32 fw_devlink_get_flags(void)
1537*4882a593Smuzhiyun {
1538*4882a593Smuzhiyun return fw_devlink_flags;
1539*4882a593Smuzhiyun }
1540*4882a593Smuzhiyun
fw_devlink_is_permissive(void)1541*4882a593Smuzhiyun static bool fw_devlink_is_permissive(void)
1542*4882a593Smuzhiyun {
1543*4882a593Smuzhiyun return fw_devlink_flags == FW_DEVLINK_FLAGS_PERMISSIVE;
1544*4882a593Smuzhiyun }
1545*4882a593Smuzhiyun
fw_devlink_is_strict(void)1546*4882a593Smuzhiyun bool fw_devlink_is_strict(void)
1547*4882a593Smuzhiyun {
1548*4882a593Smuzhiyun return fw_devlink_strict && !fw_devlink_is_permissive();
1549*4882a593Smuzhiyun }
1550*4882a593Smuzhiyun
fw_devlink_parse_fwnode(struct fwnode_handle * fwnode)1551*4882a593Smuzhiyun static void fw_devlink_parse_fwnode(struct fwnode_handle *fwnode)
1552*4882a593Smuzhiyun {
1553*4882a593Smuzhiyun if (fwnode->flags & FWNODE_FLAG_LINKS_ADDED)
1554*4882a593Smuzhiyun return;
1555*4882a593Smuzhiyun
1556*4882a593Smuzhiyun fwnode_call_int_op(fwnode, add_links);
1557*4882a593Smuzhiyun fwnode->flags |= FWNODE_FLAG_LINKS_ADDED;
1558*4882a593Smuzhiyun }
1559*4882a593Smuzhiyun
fw_devlink_parse_fwtree(struct fwnode_handle * fwnode)1560*4882a593Smuzhiyun static void fw_devlink_parse_fwtree(struct fwnode_handle *fwnode)
1561*4882a593Smuzhiyun {
1562*4882a593Smuzhiyun struct fwnode_handle *child = NULL;
1563*4882a593Smuzhiyun
1564*4882a593Smuzhiyun fw_devlink_parse_fwnode(fwnode);
1565*4882a593Smuzhiyun
1566*4882a593Smuzhiyun while ((child = fwnode_get_next_available_child_node(fwnode, child)))
1567*4882a593Smuzhiyun fw_devlink_parse_fwtree(child);
1568*4882a593Smuzhiyun }
1569*4882a593Smuzhiyun
1570*4882a593Smuzhiyun /**
1571*4882a593Smuzhiyun * fw_devlink_relax_cycle - Convert cyclic links to SYNC_STATE_ONLY links
1572*4882a593Smuzhiyun * @con: Device to check dependencies for.
1573*4882a593Smuzhiyun * @sup: Device to check against.
1574*4882a593Smuzhiyun *
1575*4882a593Smuzhiyun * Check if @sup depends on @con or any device dependent on it (its child or
1576*4882a593Smuzhiyun * its consumer etc). When such a cyclic dependency is found, convert all
1577*4882a593Smuzhiyun * device links created solely by fw_devlink into SYNC_STATE_ONLY device links.
1578*4882a593Smuzhiyun * This is the equivalent of doing fw_devlink=permissive just between the
1579*4882a593Smuzhiyun * devices in the cycle. We need to do this because, at this point, fw_devlink
1580*4882a593Smuzhiyun * can't tell which of these dependencies is not a real dependency.
1581*4882a593Smuzhiyun *
1582*4882a593Smuzhiyun * Return 1 if a cycle is found. Otherwise, return 0.
1583*4882a593Smuzhiyun */
fw_devlink_relax_cycle(struct device * con,void * sup)1584*4882a593Smuzhiyun int fw_devlink_relax_cycle(struct device *con, void *sup)
1585*4882a593Smuzhiyun {
1586*4882a593Smuzhiyun struct device_link *link;
1587*4882a593Smuzhiyun int ret;
1588*4882a593Smuzhiyun
1589*4882a593Smuzhiyun if (con == sup)
1590*4882a593Smuzhiyun return 1;
1591*4882a593Smuzhiyun
1592*4882a593Smuzhiyun ret = device_for_each_child(con, sup, fw_devlink_relax_cycle);
1593*4882a593Smuzhiyun if (ret)
1594*4882a593Smuzhiyun return ret;
1595*4882a593Smuzhiyun
1596*4882a593Smuzhiyun list_for_each_entry(link, &con->links.consumers, s_node) {
1597*4882a593Smuzhiyun if ((link->flags & ~DL_FLAG_INFERRED) ==
1598*4882a593Smuzhiyun (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
1599*4882a593Smuzhiyun continue;
1600*4882a593Smuzhiyun
1601*4882a593Smuzhiyun if (!fw_devlink_relax_cycle(link->consumer, sup))
1602*4882a593Smuzhiyun continue;
1603*4882a593Smuzhiyun
1604*4882a593Smuzhiyun ret = 1;
1605*4882a593Smuzhiyun
1606*4882a593Smuzhiyun if (!(link->flags & DL_FLAG_INFERRED))
1607*4882a593Smuzhiyun continue;
1608*4882a593Smuzhiyun
1609*4882a593Smuzhiyun pm_runtime_drop_link(link);
1610*4882a593Smuzhiyun link->flags = DL_FLAG_MANAGED | FW_DEVLINK_FLAGS_PERMISSIVE;
1611*4882a593Smuzhiyun dev_dbg(link->consumer, "Relaxing link with %s\n",
1612*4882a593Smuzhiyun dev_name(link->supplier));
1613*4882a593Smuzhiyun }
1614*4882a593Smuzhiyun return ret;
1615*4882a593Smuzhiyun }
1616*4882a593Smuzhiyun
1617*4882a593Smuzhiyun /**
1618*4882a593Smuzhiyun * fw_devlink_create_devlink - Create a device link from a consumer to fwnode
1619*4882a593Smuzhiyun * @con - Consumer device for the device link
1620*4882a593Smuzhiyun * @sup_handle - fwnode handle of supplier
1621*4882a593Smuzhiyun *
1622*4882a593Smuzhiyun * This function will try to create a device link between the consumer device
1623*4882a593Smuzhiyun * @con and the supplier device represented by @sup_handle.
1624*4882a593Smuzhiyun *
1625*4882a593Smuzhiyun * The supplier has to be provided as a fwnode because incorrect cycles in
1626*4882a593Smuzhiyun * fwnode links can sometimes cause the supplier device to never be created.
1627*4882a593Smuzhiyun * This function detects such cases and returns an error if it cannot create a
1628*4882a593Smuzhiyun * device link from the consumer to a missing supplier.
1629*4882a593Smuzhiyun *
1630*4882a593Smuzhiyun * Returns,
1631*4882a593Smuzhiyun * 0 on successfully creating a device link
1632*4882a593Smuzhiyun * -EINVAL if the device link cannot be created as expected
1633*4882a593Smuzhiyun * -EAGAIN if the device link cannot be created right now, but it may be
1634*4882a593Smuzhiyun * possible to do that in the future
1635*4882a593Smuzhiyun */
fw_devlink_create_devlink(struct device * con,struct fwnode_handle * sup_handle,u32 flags)1636*4882a593Smuzhiyun static int fw_devlink_create_devlink(struct device *con,
1637*4882a593Smuzhiyun struct fwnode_handle *sup_handle, u32 flags)
1638*4882a593Smuzhiyun {
1639*4882a593Smuzhiyun struct device *sup_dev;
1640*4882a593Smuzhiyun int ret = 0;
1641*4882a593Smuzhiyun
1642*4882a593Smuzhiyun sup_dev = get_dev_from_fwnode(sup_handle);
1643*4882a593Smuzhiyun if (sup_dev) {
1644*4882a593Smuzhiyun /*
1645*4882a593Smuzhiyun * If it's one of those drivers that don't actually bind to
1646*4882a593Smuzhiyun * their device using driver core, then don't wait on this
1647*4882a593Smuzhiyun * supplier device indefinitely.
1648*4882a593Smuzhiyun */
1649*4882a593Smuzhiyun if (sup_dev->links.status == DL_DEV_NO_DRIVER &&
1650*4882a593Smuzhiyun sup_handle->flags & FWNODE_FLAG_INITIALIZED) {
1651*4882a593Smuzhiyun ret = -EINVAL;
1652*4882a593Smuzhiyun goto out;
1653*4882a593Smuzhiyun }
1654*4882a593Smuzhiyun
1655*4882a593Smuzhiyun /*
1656*4882a593Smuzhiyun * If this fails, it is due to cycles in device links. Just
1657*4882a593Smuzhiyun * give up on this link and treat it as invalid.
1658*4882a593Smuzhiyun */
1659*4882a593Smuzhiyun if (!device_link_add(con, sup_dev, flags) &&
1660*4882a593Smuzhiyun !(flags & DL_FLAG_SYNC_STATE_ONLY)) {
1661*4882a593Smuzhiyun dev_info(con, "Fixing up cyclic dependency with %s\n",
1662*4882a593Smuzhiyun dev_name(sup_dev));
1663*4882a593Smuzhiyun device_links_write_lock();
1664*4882a593Smuzhiyun fw_devlink_relax_cycle(con, sup_dev);
1665*4882a593Smuzhiyun device_links_write_unlock();
1666*4882a593Smuzhiyun device_link_add(con, sup_dev,
1667*4882a593Smuzhiyun FW_DEVLINK_FLAGS_PERMISSIVE);
1668*4882a593Smuzhiyun ret = -EINVAL;
1669*4882a593Smuzhiyun }
1670*4882a593Smuzhiyun
1671*4882a593Smuzhiyun goto out;
1672*4882a593Smuzhiyun }
1673*4882a593Smuzhiyun
1674*4882a593Smuzhiyun /* Supplier that's already initialized without a struct device. */
1675*4882a593Smuzhiyun if (sup_handle->flags & FWNODE_FLAG_INITIALIZED)
1676*4882a593Smuzhiyun return -EINVAL;
1677*4882a593Smuzhiyun
1678*4882a593Smuzhiyun /*
1679*4882a593Smuzhiyun * DL_FLAG_SYNC_STATE_ONLY doesn't block probing and supports
1680*4882a593Smuzhiyun * cycles. So cycle detection isn't necessary and shouldn't be
1681*4882a593Smuzhiyun * done.
1682*4882a593Smuzhiyun */
1683*4882a593Smuzhiyun if (flags & DL_FLAG_SYNC_STATE_ONLY)
1684*4882a593Smuzhiyun return -EAGAIN;
1685*4882a593Smuzhiyun
1686*4882a593Smuzhiyun /*
1687*4882a593Smuzhiyun * If we can't find the supplier device from its fwnode, it might be
1688*4882a593Smuzhiyun * due to a cyclic dependency between fwnodes. Some of these cycles can
1689*4882a593Smuzhiyun * be broken by applying logic. Check for these types of cycles and
1690*4882a593Smuzhiyun * break them so that devices in the cycle probe properly.
1691*4882a593Smuzhiyun *
1692*4882a593Smuzhiyun * If the supplier's parent is dependent on the consumer, then the
1693*4882a593Smuzhiyun * consumer and supplier have a cyclic dependency. Since fw_devlink
1694*4882a593Smuzhiyun * can't tell which of the inferred dependencies are incorrect, don't
1695*4882a593Smuzhiyun * enforce probe ordering between any of the devices in this cyclic
1696*4882a593Smuzhiyun * dependency. Do this by relaxing all the fw_devlink device links in
1697*4882a593Smuzhiyun * this cycle and by treating the fwnode link between the consumer and
1698*4882a593Smuzhiyun * the supplier as an invalid dependency.
1699*4882a593Smuzhiyun */
1700*4882a593Smuzhiyun sup_dev = fwnode_get_next_parent_dev(sup_handle);
1701*4882a593Smuzhiyun if (sup_dev && device_is_dependent(con, sup_dev)) {
1702*4882a593Smuzhiyun dev_info(con, "Fixing up cyclic dependency with %pfwP (%s)\n",
1703*4882a593Smuzhiyun sup_handle, dev_name(sup_dev));
1704*4882a593Smuzhiyun device_links_write_lock();
1705*4882a593Smuzhiyun fw_devlink_relax_cycle(con, sup_dev);
1706*4882a593Smuzhiyun device_links_write_unlock();
1707*4882a593Smuzhiyun ret = -EINVAL;
1708*4882a593Smuzhiyun } else {
1709*4882a593Smuzhiyun /*
1710*4882a593Smuzhiyun * Can't check for cycles or no cycles. So let's try
1711*4882a593Smuzhiyun * again later.
1712*4882a593Smuzhiyun */
1713*4882a593Smuzhiyun ret = -EAGAIN;
1714*4882a593Smuzhiyun }
1715*4882a593Smuzhiyun
1716*4882a593Smuzhiyun out:
1717*4882a593Smuzhiyun put_device(sup_dev);
1718*4882a593Smuzhiyun return ret;
1719*4882a593Smuzhiyun }
1720*4882a593Smuzhiyun
1721*4882a593Smuzhiyun /**
1722*4882a593Smuzhiyun * __fw_devlink_link_to_consumers - Create device links to consumers of a device
1723*4882a593Smuzhiyun * @dev - Device that needs to be linked to its consumers
1724*4882a593Smuzhiyun *
1725*4882a593Smuzhiyun * This function looks at all the consumer fwnodes of @dev and creates device
1726*4882a593Smuzhiyun * links between the consumer device and @dev (supplier).
1727*4882a593Smuzhiyun *
1728*4882a593Smuzhiyun * If the consumer device has not been added yet, then this function creates a
1729*4882a593Smuzhiyun * SYNC_STATE_ONLY link between @dev (supplier) and the closest ancestor device
1730*4882a593Smuzhiyun * of the consumer fwnode. This is necessary to make sure @dev doesn't get a
1731*4882a593Smuzhiyun * sync_state() callback before the real consumer device gets to be added and
1732*4882a593Smuzhiyun * then probed.
1733*4882a593Smuzhiyun *
1734*4882a593Smuzhiyun * Once device links are created from the real consumer to @dev (supplier), the
1735*4882a593Smuzhiyun * fwnode links are deleted.
1736*4882a593Smuzhiyun */
__fw_devlink_link_to_consumers(struct device * dev)1737*4882a593Smuzhiyun static void __fw_devlink_link_to_consumers(struct device *dev)
1738*4882a593Smuzhiyun {
1739*4882a593Smuzhiyun struct fwnode_handle *fwnode = dev->fwnode;
1740*4882a593Smuzhiyun struct fwnode_link *link, *tmp;
1741*4882a593Smuzhiyun
1742*4882a593Smuzhiyun list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook) {
1743*4882a593Smuzhiyun u32 dl_flags = fw_devlink_get_flags();
1744*4882a593Smuzhiyun struct device *con_dev;
1745*4882a593Smuzhiyun bool own_link = true;
1746*4882a593Smuzhiyun int ret;
1747*4882a593Smuzhiyun
1748*4882a593Smuzhiyun con_dev = get_dev_from_fwnode(link->consumer);
1749*4882a593Smuzhiyun /*
1750*4882a593Smuzhiyun * If consumer device is not available yet, make a "proxy"
1751*4882a593Smuzhiyun * SYNC_STATE_ONLY link from the consumer's parent device to
1752*4882a593Smuzhiyun * the supplier device. This is necessary to make sure the
1753*4882a593Smuzhiyun * supplier doesn't get a sync_state() callback before the real
1754*4882a593Smuzhiyun * consumer can create a device link to the supplier.
1755*4882a593Smuzhiyun *
1756*4882a593Smuzhiyun * This proxy link step is needed to handle the case where the
1757*4882a593Smuzhiyun * consumer's parent device is added before the supplier.
1758*4882a593Smuzhiyun */
1759*4882a593Smuzhiyun if (!con_dev) {
1760*4882a593Smuzhiyun con_dev = fwnode_get_next_parent_dev(link->consumer);
1761*4882a593Smuzhiyun /*
1762*4882a593Smuzhiyun * However, if the consumer's parent device is also the
1763*4882a593Smuzhiyun * parent of the supplier, don't create a
1764*4882a593Smuzhiyun * consumer-supplier link from the parent to its child
1765*4882a593Smuzhiyun * device. Such a dependency is impossible.
1766*4882a593Smuzhiyun */
1767*4882a593Smuzhiyun if (con_dev &&
1768*4882a593Smuzhiyun fwnode_is_ancestor_of(con_dev->fwnode, fwnode)) {
1769*4882a593Smuzhiyun put_device(con_dev);
1770*4882a593Smuzhiyun con_dev = NULL;
1771*4882a593Smuzhiyun } else {
1772*4882a593Smuzhiyun own_link = false;
1773*4882a593Smuzhiyun dl_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
1774*4882a593Smuzhiyun }
1775*4882a593Smuzhiyun }
1776*4882a593Smuzhiyun
1777*4882a593Smuzhiyun if (!con_dev)
1778*4882a593Smuzhiyun continue;
1779*4882a593Smuzhiyun
1780*4882a593Smuzhiyun ret = fw_devlink_create_devlink(con_dev, fwnode, dl_flags);
1781*4882a593Smuzhiyun put_device(con_dev);
1782*4882a593Smuzhiyun if (!own_link || ret == -EAGAIN)
1783*4882a593Smuzhiyun continue;
1784*4882a593Smuzhiyun
1785*4882a593Smuzhiyun list_del(&link->s_hook);
1786*4882a593Smuzhiyun list_del(&link->c_hook);
1787*4882a593Smuzhiyun kfree(link);
1788*4882a593Smuzhiyun }
1789*4882a593Smuzhiyun }
1790*4882a593Smuzhiyun
1791*4882a593Smuzhiyun /**
1792*4882a593Smuzhiyun * __fw_devlink_link_to_suppliers - Create device links to suppliers of a device
1793*4882a593Smuzhiyun * @dev - The consumer device that needs to be linked to its suppliers
1794*4882a593Smuzhiyun * @fwnode - Root of the fwnode tree that is used to create device links
1795*4882a593Smuzhiyun *
1796*4882a593Smuzhiyun * This function looks at all the supplier fwnodes of fwnode tree rooted at
1797*4882a593Smuzhiyun * @fwnode and creates device links between @dev (consumer) and all the
1798*4882a593Smuzhiyun * supplier devices of the entire fwnode tree at @fwnode.
1799*4882a593Smuzhiyun *
1800*4882a593Smuzhiyun * The function creates normal (non-SYNC_STATE_ONLY) device links between @dev
1801*4882a593Smuzhiyun * and the real suppliers of @dev. Once these device links are created, the
1802*4882a593Smuzhiyun * fwnode links are deleted. When such device links are successfully created,
1803*4882a593Smuzhiyun * this function is called recursively on those supplier devices. This is
1804*4882a593Smuzhiyun * needed to detect and break some invalid cycles in fwnode links. See
1805*4882a593Smuzhiyun * fw_devlink_create_devlink() for more details.
1806*4882a593Smuzhiyun *
1807*4882a593Smuzhiyun * In addition, it also looks at all the suppliers of the entire fwnode tree
1808*4882a593Smuzhiyun * because some of the child devices of @dev that have not been added yet
1809*4882a593Smuzhiyun * (because @dev hasn't probed) might already have their suppliers added to
1810*4882a593Smuzhiyun * driver core. So, this function creates SYNC_STATE_ONLY device links between
1811*4882a593Smuzhiyun * @dev (consumer) and these suppliers to make sure they don't execute their
1812*4882a593Smuzhiyun * sync_state() callbacks before these child devices have a chance to create
1813*4882a593Smuzhiyun * their device links. The fwnode links that correspond to the child devices
1814*4882a593Smuzhiyun * aren't delete because they are needed later to create the device links
1815*4882a593Smuzhiyun * between the real consumer and supplier devices.
1816*4882a593Smuzhiyun */
__fw_devlink_link_to_suppliers(struct device * dev,struct fwnode_handle * fwnode)1817*4882a593Smuzhiyun static void __fw_devlink_link_to_suppliers(struct device *dev,
1818*4882a593Smuzhiyun struct fwnode_handle *fwnode)
1819*4882a593Smuzhiyun {
1820*4882a593Smuzhiyun bool own_link = (dev->fwnode == fwnode);
1821*4882a593Smuzhiyun struct fwnode_link *link, *tmp;
1822*4882a593Smuzhiyun struct fwnode_handle *child = NULL;
1823*4882a593Smuzhiyun u32 dl_flags;
1824*4882a593Smuzhiyun
1825*4882a593Smuzhiyun if (own_link)
1826*4882a593Smuzhiyun dl_flags = fw_devlink_get_flags();
1827*4882a593Smuzhiyun else
1828*4882a593Smuzhiyun dl_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
1829*4882a593Smuzhiyun
1830*4882a593Smuzhiyun list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) {
1831*4882a593Smuzhiyun int ret;
1832*4882a593Smuzhiyun struct device *sup_dev;
1833*4882a593Smuzhiyun struct fwnode_handle *sup = link->supplier;
1834*4882a593Smuzhiyun
1835*4882a593Smuzhiyun ret = fw_devlink_create_devlink(dev, sup, dl_flags);
1836*4882a593Smuzhiyun if (!own_link || ret == -EAGAIN)
1837*4882a593Smuzhiyun continue;
1838*4882a593Smuzhiyun
1839*4882a593Smuzhiyun list_del(&link->s_hook);
1840*4882a593Smuzhiyun list_del(&link->c_hook);
1841*4882a593Smuzhiyun kfree(link);
1842*4882a593Smuzhiyun
1843*4882a593Smuzhiyun /* If no device link was created, nothing more to do. */
1844*4882a593Smuzhiyun if (ret)
1845*4882a593Smuzhiyun continue;
1846*4882a593Smuzhiyun
1847*4882a593Smuzhiyun /*
1848*4882a593Smuzhiyun * If a device link was successfully created to a supplier, we
1849*4882a593Smuzhiyun * now need to try and link the supplier to all its suppliers.
1850*4882a593Smuzhiyun *
1851*4882a593Smuzhiyun * This is needed to detect and delete false dependencies in
1852*4882a593Smuzhiyun * fwnode links that haven't been converted to a device link
1853*4882a593Smuzhiyun * yet. See comments in fw_devlink_create_devlink() for more
1854*4882a593Smuzhiyun * details on the false dependency.
1855*4882a593Smuzhiyun *
1856*4882a593Smuzhiyun * Without deleting these false dependencies, some devices will
1857*4882a593Smuzhiyun * never probe because they'll keep waiting for their false
1858*4882a593Smuzhiyun * dependency fwnode links to be converted to device links.
1859*4882a593Smuzhiyun */
1860*4882a593Smuzhiyun sup_dev = get_dev_from_fwnode(sup);
1861*4882a593Smuzhiyun __fw_devlink_link_to_suppliers(sup_dev, sup_dev->fwnode);
1862*4882a593Smuzhiyun put_device(sup_dev);
1863*4882a593Smuzhiyun }
1864*4882a593Smuzhiyun
1865*4882a593Smuzhiyun /*
1866*4882a593Smuzhiyun * Make "proxy" SYNC_STATE_ONLY device links to represent the needs of
1867*4882a593Smuzhiyun * all the descendants. This proxy link step is needed to handle the
1868*4882a593Smuzhiyun * case where the supplier is added before the consumer's parent device
1869*4882a593Smuzhiyun * (@dev).
1870*4882a593Smuzhiyun */
1871*4882a593Smuzhiyun while ((child = fwnode_get_next_available_child_node(fwnode, child)))
1872*4882a593Smuzhiyun __fw_devlink_link_to_suppliers(dev, child);
1873*4882a593Smuzhiyun }
1874*4882a593Smuzhiyun
fw_devlink_link_device(struct device * dev)1875*4882a593Smuzhiyun static void fw_devlink_link_device(struct device *dev)
1876*4882a593Smuzhiyun {
1877*4882a593Smuzhiyun struct fwnode_handle *fwnode = dev->fwnode;
1878*4882a593Smuzhiyun
1879*4882a593Smuzhiyun if (!fw_devlink_flags)
1880*4882a593Smuzhiyun return;
1881*4882a593Smuzhiyun
1882*4882a593Smuzhiyun fw_devlink_parse_fwtree(fwnode);
1883*4882a593Smuzhiyun
1884*4882a593Smuzhiyun mutex_lock(&fwnode_link_lock);
1885*4882a593Smuzhiyun __fw_devlink_link_to_consumers(dev);
1886*4882a593Smuzhiyun __fw_devlink_link_to_suppliers(dev, fwnode);
1887*4882a593Smuzhiyun mutex_unlock(&fwnode_link_lock);
1888*4882a593Smuzhiyun }
1889*4882a593Smuzhiyun
1890*4882a593Smuzhiyun /* Device links support end. */
1891*4882a593Smuzhiyun
1892*4882a593Smuzhiyun int (*platform_notify)(struct device *dev) = NULL;
1893*4882a593Smuzhiyun int (*platform_notify_remove)(struct device *dev) = NULL;
1894*4882a593Smuzhiyun static struct kobject *dev_kobj;
1895*4882a593Smuzhiyun struct kobject *sysfs_dev_char_kobj;
1896*4882a593Smuzhiyun struct kobject *sysfs_dev_block_kobj;
1897*4882a593Smuzhiyun
1898*4882a593Smuzhiyun static DEFINE_MUTEX(device_hotplug_lock);
1899*4882a593Smuzhiyun
lock_device_hotplug(void)1900*4882a593Smuzhiyun void lock_device_hotplug(void)
1901*4882a593Smuzhiyun {
1902*4882a593Smuzhiyun mutex_lock(&device_hotplug_lock);
1903*4882a593Smuzhiyun }
1904*4882a593Smuzhiyun
unlock_device_hotplug(void)1905*4882a593Smuzhiyun void unlock_device_hotplug(void)
1906*4882a593Smuzhiyun {
1907*4882a593Smuzhiyun mutex_unlock(&device_hotplug_lock);
1908*4882a593Smuzhiyun }
1909*4882a593Smuzhiyun
lock_device_hotplug_sysfs(void)1910*4882a593Smuzhiyun int lock_device_hotplug_sysfs(void)
1911*4882a593Smuzhiyun {
1912*4882a593Smuzhiyun if (mutex_trylock(&device_hotplug_lock))
1913*4882a593Smuzhiyun return 0;
1914*4882a593Smuzhiyun
1915*4882a593Smuzhiyun /* Avoid busy looping (5 ms of sleep should do). */
1916*4882a593Smuzhiyun msleep(5);
1917*4882a593Smuzhiyun return restart_syscall();
1918*4882a593Smuzhiyun }
1919*4882a593Smuzhiyun
1920*4882a593Smuzhiyun #ifdef CONFIG_BLOCK
device_is_not_partition(struct device * dev)1921*4882a593Smuzhiyun static inline int device_is_not_partition(struct device *dev)
1922*4882a593Smuzhiyun {
1923*4882a593Smuzhiyun return !(dev->type == &part_type);
1924*4882a593Smuzhiyun }
1925*4882a593Smuzhiyun #else
device_is_not_partition(struct device * dev)1926*4882a593Smuzhiyun static inline int device_is_not_partition(struct device *dev)
1927*4882a593Smuzhiyun {
1928*4882a593Smuzhiyun return 1;
1929*4882a593Smuzhiyun }
1930*4882a593Smuzhiyun #endif
1931*4882a593Smuzhiyun
1932*4882a593Smuzhiyun static int
device_platform_notify(struct device * dev,enum kobject_action action)1933*4882a593Smuzhiyun device_platform_notify(struct device *dev, enum kobject_action action)
1934*4882a593Smuzhiyun {
1935*4882a593Smuzhiyun int ret;
1936*4882a593Smuzhiyun
1937*4882a593Smuzhiyun ret = acpi_platform_notify(dev, action);
1938*4882a593Smuzhiyun if (ret)
1939*4882a593Smuzhiyun return ret;
1940*4882a593Smuzhiyun
1941*4882a593Smuzhiyun ret = software_node_notify(dev, action);
1942*4882a593Smuzhiyun if (ret)
1943*4882a593Smuzhiyun return ret;
1944*4882a593Smuzhiyun
1945*4882a593Smuzhiyun if (platform_notify && action == KOBJ_ADD)
1946*4882a593Smuzhiyun platform_notify(dev);
1947*4882a593Smuzhiyun else if (platform_notify_remove && action == KOBJ_REMOVE)
1948*4882a593Smuzhiyun platform_notify_remove(dev);
1949*4882a593Smuzhiyun return 0;
1950*4882a593Smuzhiyun }
1951*4882a593Smuzhiyun
1952*4882a593Smuzhiyun /**
1953*4882a593Smuzhiyun * dev_driver_string - Return a device's driver name, if at all possible
1954*4882a593Smuzhiyun * @dev: struct device to get the name of
1955*4882a593Smuzhiyun *
1956*4882a593Smuzhiyun * Will return the device's driver's name if it is bound to a device. If
1957*4882a593Smuzhiyun * the device is not bound to a driver, it will return the name of the bus
1958*4882a593Smuzhiyun * it is attached to. If it is not attached to a bus either, an empty
1959*4882a593Smuzhiyun * string will be returned.
1960*4882a593Smuzhiyun */
dev_driver_string(const struct device * dev)1961*4882a593Smuzhiyun const char *dev_driver_string(const struct device *dev)
1962*4882a593Smuzhiyun {
1963*4882a593Smuzhiyun struct device_driver *drv;
1964*4882a593Smuzhiyun
1965*4882a593Smuzhiyun /* dev->driver can change to NULL underneath us because of unbinding,
1966*4882a593Smuzhiyun * so be careful about accessing it. dev->bus and dev->class should
1967*4882a593Smuzhiyun * never change once they are set, so they don't need special care.
1968*4882a593Smuzhiyun */
1969*4882a593Smuzhiyun drv = READ_ONCE(dev->driver);
1970*4882a593Smuzhiyun return drv ? drv->name : dev_bus_name(dev);
1971*4882a593Smuzhiyun }
1972*4882a593Smuzhiyun EXPORT_SYMBOL(dev_driver_string);
1973*4882a593Smuzhiyun
1974*4882a593Smuzhiyun #define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
1975*4882a593Smuzhiyun
dev_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)1976*4882a593Smuzhiyun static ssize_t dev_attr_show(struct kobject *kobj, struct attribute *attr,
1977*4882a593Smuzhiyun char *buf)
1978*4882a593Smuzhiyun {
1979*4882a593Smuzhiyun struct device_attribute *dev_attr = to_dev_attr(attr);
1980*4882a593Smuzhiyun struct device *dev = kobj_to_dev(kobj);
1981*4882a593Smuzhiyun ssize_t ret = -EIO;
1982*4882a593Smuzhiyun
1983*4882a593Smuzhiyun if (dev_attr->show)
1984*4882a593Smuzhiyun ret = dev_attr->show(dev, dev_attr, buf);
1985*4882a593Smuzhiyun if (ret >= (ssize_t)PAGE_SIZE) {
1986*4882a593Smuzhiyun printk("dev_attr_show: %pS returned bad count\n",
1987*4882a593Smuzhiyun dev_attr->show);
1988*4882a593Smuzhiyun }
1989*4882a593Smuzhiyun return ret;
1990*4882a593Smuzhiyun }
1991*4882a593Smuzhiyun
dev_attr_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t count)1992*4882a593Smuzhiyun static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
1993*4882a593Smuzhiyun const char *buf, size_t count)
1994*4882a593Smuzhiyun {
1995*4882a593Smuzhiyun struct device_attribute *dev_attr = to_dev_attr(attr);
1996*4882a593Smuzhiyun struct device *dev = kobj_to_dev(kobj);
1997*4882a593Smuzhiyun ssize_t ret = -EIO;
1998*4882a593Smuzhiyun
1999*4882a593Smuzhiyun if (dev_attr->store)
2000*4882a593Smuzhiyun ret = dev_attr->store(dev, dev_attr, buf, count);
2001*4882a593Smuzhiyun return ret;
2002*4882a593Smuzhiyun }
2003*4882a593Smuzhiyun
2004*4882a593Smuzhiyun static const struct sysfs_ops dev_sysfs_ops = {
2005*4882a593Smuzhiyun .show = dev_attr_show,
2006*4882a593Smuzhiyun .store = dev_attr_store,
2007*4882a593Smuzhiyun };
2008*4882a593Smuzhiyun
2009*4882a593Smuzhiyun #define to_ext_attr(x) container_of(x, struct dev_ext_attribute, attr)
2010*4882a593Smuzhiyun
device_store_ulong(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)2011*4882a593Smuzhiyun ssize_t device_store_ulong(struct device *dev,
2012*4882a593Smuzhiyun struct device_attribute *attr,
2013*4882a593Smuzhiyun const char *buf, size_t size)
2014*4882a593Smuzhiyun {
2015*4882a593Smuzhiyun struct dev_ext_attribute *ea = to_ext_attr(attr);
2016*4882a593Smuzhiyun int ret;
2017*4882a593Smuzhiyun unsigned long new;
2018*4882a593Smuzhiyun
2019*4882a593Smuzhiyun ret = kstrtoul(buf, 0, &new);
2020*4882a593Smuzhiyun if (ret)
2021*4882a593Smuzhiyun return ret;
2022*4882a593Smuzhiyun *(unsigned long *)(ea->var) = new;
2023*4882a593Smuzhiyun /* Always return full write size even if we didn't consume all */
2024*4882a593Smuzhiyun return size;
2025*4882a593Smuzhiyun }
2026*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(device_store_ulong);
2027*4882a593Smuzhiyun
device_show_ulong(struct device * dev,struct device_attribute * attr,char * buf)2028*4882a593Smuzhiyun ssize_t device_show_ulong(struct device *dev,
2029*4882a593Smuzhiyun struct device_attribute *attr,
2030*4882a593Smuzhiyun char *buf)
2031*4882a593Smuzhiyun {
2032*4882a593Smuzhiyun struct dev_ext_attribute *ea = to_ext_attr(attr);
2033*4882a593Smuzhiyun return sysfs_emit(buf, "%lx\n", *(unsigned long *)(ea->var));
2034*4882a593Smuzhiyun }
2035*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(device_show_ulong);
2036*4882a593Smuzhiyun
device_store_int(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)2037*4882a593Smuzhiyun ssize_t device_store_int(struct device *dev,
2038*4882a593Smuzhiyun struct device_attribute *attr,
2039*4882a593Smuzhiyun const char *buf, size_t size)
2040*4882a593Smuzhiyun {
2041*4882a593Smuzhiyun struct dev_ext_attribute *ea = to_ext_attr(attr);
2042*4882a593Smuzhiyun int ret;
2043*4882a593Smuzhiyun long new;
2044*4882a593Smuzhiyun
2045*4882a593Smuzhiyun ret = kstrtol(buf, 0, &new);
2046*4882a593Smuzhiyun if (ret)
2047*4882a593Smuzhiyun return ret;
2048*4882a593Smuzhiyun
2049*4882a593Smuzhiyun if (new > INT_MAX || new < INT_MIN)
2050*4882a593Smuzhiyun return -EINVAL;
2051*4882a593Smuzhiyun *(int *)(ea->var) = new;
2052*4882a593Smuzhiyun /* Always return full write size even if we didn't consume all */
2053*4882a593Smuzhiyun return size;
2054*4882a593Smuzhiyun }
2055*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(device_store_int);
2056*4882a593Smuzhiyun
device_show_int(struct device * dev,struct device_attribute * attr,char * buf)2057*4882a593Smuzhiyun ssize_t device_show_int(struct device *dev,
2058*4882a593Smuzhiyun struct device_attribute *attr,
2059*4882a593Smuzhiyun char *buf)
2060*4882a593Smuzhiyun {
2061*4882a593Smuzhiyun struct dev_ext_attribute *ea = to_ext_attr(attr);
2062*4882a593Smuzhiyun
2063*4882a593Smuzhiyun return sysfs_emit(buf, "%d\n", *(int *)(ea->var));
2064*4882a593Smuzhiyun }
2065*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(device_show_int);
2066*4882a593Smuzhiyun
device_store_bool(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)2067*4882a593Smuzhiyun ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
2068*4882a593Smuzhiyun const char *buf, size_t size)
2069*4882a593Smuzhiyun {
2070*4882a593Smuzhiyun struct dev_ext_attribute *ea = to_ext_attr(attr);
2071*4882a593Smuzhiyun
2072*4882a593Smuzhiyun if (strtobool(buf, ea->var) < 0)
2073*4882a593Smuzhiyun return -EINVAL;
2074*4882a593Smuzhiyun
2075*4882a593Smuzhiyun return size;
2076*4882a593Smuzhiyun }
2077*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(device_store_bool);
2078*4882a593Smuzhiyun
device_show_bool(struct device * dev,struct device_attribute * attr,char * buf)2079*4882a593Smuzhiyun ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
2080*4882a593Smuzhiyun char *buf)
2081*4882a593Smuzhiyun {
2082*4882a593Smuzhiyun struct dev_ext_attribute *ea = to_ext_attr(attr);
2083*4882a593Smuzhiyun
2084*4882a593Smuzhiyun return sysfs_emit(buf, "%d\n", *(bool *)(ea->var));
2085*4882a593Smuzhiyun }
2086*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(device_show_bool);
2087*4882a593Smuzhiyun
2088*4882a593Smuzhiyun /**
2089*4882a593Smuzhiyun * device_release - free device structure.
2090*4882a593Smuzhiyun * @kobj: device's kobject.
2091*4882a593Smuzhiyun *
2092*4882a593Smuzhiyun * This is called once the reference count for the object
2093*4882a593Smuzhiyun * reaches 0. We forward the call to the device's release
2094*4882a593Smuzhiyun * method, which should handle actually freeing the structure.
2095*4882a593Smuzhiyun */
device_release(struct kobject * kobj)2096*4882a593Smuzhiyun static void device_release(struct kobject *kobj)
2097*4882a593Smuzhiyun {
2098*4882a593Smuzhiyun struct device *dev = kobj_to_dev(kobj);
2099*4882a593Smuzhiyun struct device_private *p = dev->p;
2100*4882a593Smuzhiyun
2101*4882a593Smuzhiyun /*
2102*4882a593Smuzhiyun * Some platform devices are driven without driver attached
2103*4882a593Smuzhiyun * and managed resources may have been acquired. Make sure
2104*4882a593Smuzhiyun * all resources are released.
2105*4882a593Smuzhiyun *
2106*4882a593Smuzhiyun * Drivers still can add resources into device after device
2107*4882a593Smuzhiyun * is deleted but alive, so release devres here to avoid
2108*4882a593Smuzhiyun * possible memory leak.
2109*4882a593Smuzhiyun */
2110*4882a593Smuzhiyun devres_release_all(dev);
2111*4882a593Smuzhiyun
2112*4882a593Smuzhiyun kfree(dev->dma_range_map);
2113*4882a593Smuzhiyun
2114*4882a593Smuzhiyun if (dev->release)
2115*4882a593Smuzhiyun dev->release(dev);
2116*4882a593Smuzhiyun else if (dev->type && dev->type->release)
2117*4882a593Smuzhiyun dev->type->release(dev);
2118*4882a593Smuzhiyun else if (dev->class && dev->class->dev_release)
2119*4882a593Smuzhiyun dev->class->dev_release(dev);
2120*4882a593Smuzhiyun else
2121*4882a593Smuzhiyun WARN(1, KERN_ERR "Device '%s' does not have a release() function, it is broken and must be fixed. See Documentation/core-api/kobject.rst.\n",
2122*4882a593Smuzhiyun dev_name(dev));
2123*4882a593Smuzhiyun kfree(p);
2124*4882a593Smuzhiyun }
2125*4882a593Smuzhiyun
device_namespace(struct kobject * kobj)2126*4882a593Smuzhiyun static const void *device_namespace(struct kobject *kobj)
2127*4882a593Smuzhiyun {
2128*4882a593Smuzhiyun struct device *dev = kobj_to_dev(kobj);
2129*4882a593Smuzhiyun const void *ns = NULL;
2130*4882a593Smuzhiyun
2131*4882a593Smuzhiyun if (dev->class && dev->class->ns_type)
2132*4882a593Smuzhiyun ns = dev->class->namespace(dev);
2133*4882a593Smuzhiyun
2134*4882a593Smuzhiyun return ns;
2135*4882a593Smuzhiyun }
2136*4882a593Smuzhiyun
device_get_ownership(struct kobject * kobj,kuid_t * uid,kgid_t * gid)2137*4882a593Smuzhiyun static void device_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
2138*4882a593Smuzhiyun {
2139*4882a593Smuzhiyun struct device *dev = kobj_to_dev(kobj);
2140*4882a593Smuzhiyun
2141*4882a593Smuzhiyun if (dev->class && dev->class->get_ownership)
2142*4882a593Smuzhiyun dev->class->get_ownership(dev, uid, gid);
2143*4882a593Smuzhiyun }
2144*4882a593Smuzhiyun
2145*4882a593Smuzhiyun static struct kobj_type device_ktype = {
2146*4882a593Smuzhiyun .release = device_release,
2147*4882a593Smuzhiyun .sysfs_ops = &dev_sysfs_ops,
2148*4882a593Smuzhiyun .namespace = device_namespace,
2149*4882a593Smuzhiyun .get_ownership = device_get_ownership,
2150*4882a593Smuzhiyun };
2151*4882a593Smuzhiyun
2152*4882a593Smuzhiyun
dev_uevent_filter(struct kset * kset,struct kobject * kobj)2153*4882a593Smuzhiyun static int dev_uevent_filter(struct kset *kset, struct kobject *kobj)
2154*4882a593Smuzhiyun {
2155*4882a593Smuzhiyun struct kobj_type *ktype = get_ktype(kobj);
2156*4882a593Smuzhiyun
2157*4882a593Smuzhiyun if (ktype == &device_ktype) {
2158*4882a593Smuzhiyun struct device *dev = kobj_to_dev(kobj);
2159*4882a593Smuzhiyun if (dev->bus)
2160*4882a593Smuzhiyun return 1;
2161*4882a593Smuzhiyun if (dev->class)
2162*4882a593Smuzhiyun return 1;
2163*4882a593Smuzhiyun }
2164*4882a593Smuzhiyun return 0;
2165*4882a593Smuzhiyun }
2166*4882a593Smuzhiyun
dev_uevent_name(struct kset * kset,struct kobject * kobj)2167*4882a593Smuzhiyun static const char *dev_uevent_name(struct kset *kset, struct kobject *kobj)
2168*4882a593Smuzhiyun {
2169*4882a593Smuzhiyun struct device *dev = kobj_to_dev(kobj);
2170*4882a593Smuzhiyun
2171*4882a593Smuzhiyun if (dev->bus)
2172*4882a593Smuzhiyun return dev->bus->name;
2173*4882a593Smuzhiyun if (dev->class)
2174*4882a593Smuzhiyun return dev->class->name;
2175*4882a593Smuzhiyun return NULL;
2176*4882a593Smuzhiyun }
2177*4882a593Smuzhiyun
dev_uevent(struct kset * kset,struct kobject * kobj,struct kobj_uevent_env * env)2178*4882a593Smuzhiyun static int dev_uevent(struct kset *kset, struct kobject *kobj,
2179*4882a593Smuzhiyun struct kobj_uevent_env *env)
2180*4882a593Smuzhiyun {
2181*4882a593Smuzhiyun struct device *dev = kobj_to_dev(kobj);
2182*4882a593Smuzhiyun int retval = 0;
2183*4882a593Smuzhiyun
2184*4882a593Smuzhiyun /* add device node properties if present */
2185*4882a593Smuzhiyun if (MAJOR(dev->devt)) {
2186*4882a593Smuzhiyun const char *tmp;
2187*4882a593Smuzhiyun const char *name;
2188*4882a593Smuzhiyun umode_t mode = 0;
2189*4882a593Smuzhiyun kuid_t uid = GLOBAL_ROOT_UID;
2190*4882a593Smuzhiyun kgid_t gid = GLOBAL_ROOT_GID;
2191*4882a593Smuzhiyun
2192*4882a593Smuzhiyun add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt));
2193*4882a593Smuzhiyun add_uevent_var(env, "MINOR=%u", MINOR(dev->devt));
2194*4882a593Smuzhiyun name = device_get_devnode(dev, &mode, &uid, &gid, &tmp);
2195*4882a593Smuzhiyun if (name) {
2196*4882a593Smuzhiyun add_uevent_var(env, "DEVNAME=%s", name);
2197*4882a593Smuzhiyun if (mode)
2198*4882a593Smuzhiyun add_uevent_var(env, "DEVMODE=%#o", mode & 0777);
2199*4882a593Smuzhiyun if (!uid_eq(uid, GLOBAL_ROOT_UID))
2200*4882a593Smuzhiyun add_uevent_var(env, "DEVUID=%u", from_kuid(&init_user_ns, uid));
2201*4882a593Smuzhiyun if (!gid_eq(gid, GLOBAL_ROOT_GID))
2202*4882a593Smuzhiyun add_uevent_var(env, "DEVGID=%u", from_kgid(&init_user_ns, gid));
2203*4882a593Smuzhiyun kfree(tmp);
2204*4882a593Smuzhiyun }
2205*4882a593Smuzhiyun }
2206*4882a593Smuzhiyun
2207*4882a593Smuzhiyun if (dev->type && dev->type->name)
2208*4882a593Smuzhiyun add_uevent_var(env, "DEVTYPE=%s", dev->type->name);
2209*4882a593Smuzhiyun
2210*4882a593Smuzhiyun if (dev->driver)
2211*4882a593Smuzhiyun add_uevent_var(env, "DRIVER=%s", dev->driver->name);
2212*4882a593Smuzhiyun
2213*4882a593Smuzhiyun /* Add common DT information about the device */
2214*4882a593Smuzhiyun of_device_uevent(dev, env);
2215*4882a593Smuzhiyun
2216*4882a593Smuzhiyun /* have the bus specific function add its stuff */
2217*4882a593Smuzhiyun if (dev->bus && dev->bus->uevent) {
2218*4882a593Smuzhiyun retval = dev->bus->uevent(dev, env);
2219*4882a593Smuzhiyun if (retval)
2220*4882a593Smuzhiyun pr_debug("device: '%s': %s: bus uevent() returned %d\n",
2221*4882a593Smuzhiyun dev_name(dev), __func__, retval);
2222*4882a593Smuzhiyun }
2223*4882a593Smuzhiyun
2224*4882a593Smuzhiyun /* have the class specific function add its stuff */
2225*4882a593Smuzhiyun if (dev->class && dev->class->dev_uevent) {
2226*4882a593Smuzhiyun retval = dev->class->dev_uevent(dev, env);
2227*4882a593Smuzhiyun if (retval)
2228*4882a593Smuzhiyun pr_debug("device: '%s': %s: class uevent() "
2229*4882a593Smuzhiyun "returned %d\n", dev_name(dev),
2230*4882a593Smuzhiyun __func__, retval);
2231*4882a593Smuzhiyun }
2232*4882a593Smuzhiyun
2233*4882a593Smuzhiyun /* have the device type specific function add its stuff */
2234*4882a593Smuzhiyun if (dev->type && dev->type->uevent) {
2235*4882a593Smuzhiyun retval = dev->type->uevent(dev, env);
2236*4882a593Smuzhiyun if (retval)
2237*4882a593Smuzhiyun pr_debug("device: '%s': %s: dev_type uevent() "
2238*4882a593Smuzhiyun "returned %d\n", dev_name(dev),
2239*4882a593Smuzhiyun __func__, retval);
2240*4882a593Smuzhiyun }
2241*4882a593Smuzhiyun
2242*4882a593Smuzhiyun return retval;
2243*4882a593Smuzhiyun }
2244*4882a593Smuzhiyun
2245*4882a593Smuzhiyun static const struct kset_uevent_ops device_uevent_ops = {
2246*4882a593Smuzhiyun .filter = dev_uevent_filter,
2247*4882a593Smuzhiyun .name = dev_uevent_name,
2248*4882a593Smuzhiyun .uevent = dev_uevent,
2249*4882a593Smuzhiyun };
2250*4882a593Smuzhiyun
uevent_show(struct device * dev,struct device_attribute * attr,char * buf)2251*4882a593Smuzhiyun static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
2252*4882a593Smuzhiyun char *buf)
2253*4882a593Smuzhiyun {
2254*4882a593Smuzhiyun struct kobject *top_kobj;
2255*4882a593Smuzhiyun struct kset *kset;
2256*4882a593Smuzhiyun struct kobj_uevent_env *env = NULL;
2257*4882a593Smuzhiyun int i;
2258*4882a593Smuzhiyun int len = 0;
2259*4882a593Smuzhiyun int retval;
2260*4882a593Smuzhiyun
2261*4882a593Smuzhiyun /* search the kset, the device belongs to */
2262*4882a593Smuzhiyun top_kobj = &dev->kobj;
2263*4882a593Smuzhiyun while (!top_kobj->kset && top_kobj->parent)
2264*4882a593Smuzhiyun top_kobj = top_kobj->parent;
2265*4882a593Smuzhiyun if (!top_kobj->kset)
2266*4882a593Smuzhiyun goto out;
2267*4882a593Smuzhiyun
2268*4882a593Smuzhiyun kset = top_kobj->kset;
2269*4882a593Smuzhiyun if (!kset->uevent_ops || !kset->uevent_ops->uevent)
2270*4882a593Smuzhiyun goto out;
2271*4882a593Smuzhiyun
2272*4882a593Smuzhiyun /* respect filter */
2273*4882a593Smuzhiyun if (kset->uevent_ops && kset->uevent_ops->filter)
2274*4882a593Smuzhiyun if (!kset->uevent_ops->filter(kset, &dev->kobj))
2275*4882a593Smuzhiyun goto out;
2276*4882a593Smuzhiyun
2277*4882a593Smuzhiyun env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
2278*4882a593Smuzhiyun if (!env)
2279*4882a593Smuzhiyun return -ENOMEM;
2280*4882a593Smuzhiyun
2281*4882a593Smuzhiyun /* let the kset specific function add its keys */
2282*4882a593Smuzhiyun retval = kset->uevent_ops->uevent(kset, &dev->kobj, env);
2283*4882a593Smuzhiyun if (retval)
2284*4882a593Smuzhiyun goto out;
2285*4882a593Smuzhiyun
2286*4882a593Smuzhiyun /* copy keys to file */
2287*4882a593Smuzhiyun for (i = 0; i < env->envp_idx; i++)
2288*4882a593Smuzhiyun len += sysfs_emit_at(buf, len, "%s\n", env->envp[i]);
2289*4882a593Smuzhiyun out:
2290*4882a593Smuzhiyun kfree(env);
2291*4882a593Smuzhiyun return len;
2292*4882a593Smuzhiyun }
2293*4882a593Smuzhiyun
uevent_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2294*4882a593Smuzhiyun static ssize_t uevent_store(struct device *dev, struct device_attribute *attr,
2295*4882a593Smuzhiyun const char *buf, size_t count)
2296*4882a593Smuzhiyun {
2297*4882a593Smuzhiyun int rc;
2298*4882a593Smuzhiyun
2299*4882a593Smuzhiyun rc = kobject_synth_uevent(&dev->kobj, buf, count);
2300*4882a593Smuzhiyun
2301*4882a593Smuzhiyun if (rc) {
2302*4882a593Smuzhiyun dev_err(dev, "uevent: failed to send synthetic uevent\n");
2303*4882a593Smuzhiyun return rc;
2304*4882a593Smuzhiyun }
2305*4882a593Smuzhiyun
2306*4882a593Smuzhiyun return count;
2307*4882a593Smuzhiyun }
2308*4882a593Smuzhiyun static DEVICE_ATTR_RW(uevent);
2309*4882a593Smuzhiyun
online_show(struct device * dev,struct device_attribute * attr,char * buf)2310*4882a593Smuzhiyun static ssize_t online_show(struct device *dev, struct device_attribute *attr,
2311*4882a593Smuzhiyun char *buf)
2312*4882a593Smuzhiyun {
2313*4882a593Smuzhiyun bool val;
2314*4882a593Smuzhiyun
2315*4882a593Smuzhiyun device_lock(dev);
2316*4882a593Smuzhiyun val = !dev->offline;
2317*4882a593Smuzhiyun device_unlock(dev);
2318*4882a593Smuzhiyun return sysfs_emit(buf, "%u\n", val);
2319*4882a593Smuzhiyun }
2320*4882a593Smuzhiyun
online_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2321*4882a593Smuzhiyun static ssize_t online_store(struct device *dev, struct device_attribute *attr,
2322*4882a593Smuzhiyun const char *buf, size_t count)
2323*4882a593Smuzhiyun {
2324*4882a593Smuzhiyun bool val;
2325*4882a593Smuzhiyun int ret;
2326*4882a593Smuzhiyun
2327*4882a593Smuzhiyun ret = strtobool(buf, &val);
2328*4882a593Smuzhiyun if (ret < 0)
2329*4882a593Smuzhiyun return ret;
2330*4882a593Smuzhiyun
2331*4882a593Smuzhiyun ret = lock_device_hotplug_sysfs();
2332*4882a593Smuzhiyun if (ret)
2333*4882a593Smuzhiyun return ret;
2334*4882a593Smuzhiyun
2335*4882a593Smuzhiyun ret = val ? device_online(dev) : device_offline(dev);
2336*4882a593Smuzhiyun unlock_device_hotplug();
2337*4882a593Smuzhiyun return ret < 0 ? ret : count;
2338*4882a593Smuzhiyun }
2339*4882a593Smuzhiyun static DEVICE_ATTR_RW(online);
2340*4882a593Smuzhiyun
device_add_groups(struct device * dev,const struct attribute_group ** groups)2341*4882a593Smuzhiyun int device_add_groups(struct device *dev, const struct attribute_group **groups)
2342*4882a593Smuzhiyun {
2343*4882a593Smuzhiyun return sysfs_create_groups(&dev->kobj, groups);
2344*4882a593Smuzhiyun }
2345*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(device_add_groups);
2346*4882a593Smuzhiyun
device_remove_groups(struct device * dev,const struct attribute_group ** groups)2347*4882a593Smuzhiyun void device_remove_groups(struct device *dev,
2348*4882a593Smuzhiyun const struct attribute_group **groups)
2349*4882a593Smuzhiyun {
2350*4882a593Smuzhiyun sysfs_remove_groups(&dev->kobj, groups);
2351*4882a593Smuzhiyun }
2352*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(device_remove_groups);
2353*4882a593Smuzhiyun
2354*4882a593Smuzhiyun union device_attr_group_devres {
2355*4882a593Smuzhiyun const struct attribute_group *group;
2356*4882a593Smuzhiyun const struct attribute_group **groups;
2357*4882a593Smuzhiyun };
2358*4882a593Smuzhiyun
devm_attr_group_match(struct device * dev,void * res,void * data)2359*4882a593Smuzhiyun static int devm_attr_group_match(struct device *dev, void *res, void *data)
2360*4882a593Smuzhiyun {
2361*4882a593Smuzhiyun return ((union device_attr_group_devres *)res)->group == data;
2362*4882a593Smuzhiyun }
2363*4882a593Smuzhiyun
devm_attr_group_remove(struct device * dev,void * res)2364*4882a593Smuzhiyun static void devm_attr_group_remove(struct device *dev, void *res)
2365*4882a593Smuzhiyun {
2366*4882a593Smuzhiyun union device_attr_group_devres *devres = res;
2367*4882a593Smuzhiyun const struct attribute_group *group = devres->group;
2368*4882a593Smuzhiyun
2369*4882a593Smuzhiyun dev_dbg(dev, "%s: removing group %p\n", __func__, group);
2370*4882a593Smuzhiyun sysfs_remove_group(&dev->kobj, group);
2371*4882a593Smuzhiyun }
2372*4882a593Smuzhiyun
devm_attr_groups_remove(struct device * dev,void * res)2373*4882a593Smuzhiyun static void devm_attr_groups_remove(struct device *dev, void *res)
2374*4882a593Smuzhiyun {
2375*4882a593Smuzhiyun union device_attr_group_devres *devres = res;
2376*4882a593Smuzhiyun const struct attribute_group **groups = devres->groups;
2377*4882a593Smuzhiyun
2378*4882a593Smuzhiyun dev_dbg(dev, "%s: removing groups %p\n", __func__, groups);
2379*4882a593Smuzhiyun sysfs_remove_groups(&dev->kobj, groups);
2380*4882a593Smuzhiyun }
2381*4882a593Smuzhiyun
2382*4882a593Smuzhiyun /**
2383*4882a593Smuzhiyun * devm_device_add_group - given a device, create a managed attribute group
2384*4882a593Smuzhiyun * @dev: The device to create the group for
2385*4882a593Smuzhiyun * @grp: The attribute group to create
2386*4882a593Smuzhiyun *
2387*4882a593Smuzhiyun * This function creates a group for the first time. It will explicitly
2388*4882a593Smuzhiyun * warn and error if any of the attribute files being created already exist.
2389*4882a593Smuzhiyun *
2390*4882a593Smuzhiyun * Returns 0 on success or error code on failure.
2391*4882a593Smuzhiyun */
devm_device_add_group(struct device * dev,const struct attribute_group * grp)2392*4882a593Smuzhiyun int devm_device_add_group(struct device *dev, const struct attribute_group *grp)
2393*4882a593Smuzhiyun {
2394*4882a593Smuzhiyun union device_attr_group_devres *devres;
2395*4882a593Smuzhiyun int error;
2396*4882a593Smuzhiyun
2397*4882a593Smuzhiyun devres = devres_alloc(devm_attr_group_remove,
2398*4882a593Smuzhiyun sizeof(*devres), GFP_KERNEL);
2399*4882a593Smuzhiyun if (!devres)
2400*4882a593Smuzhiyun return -ENOMEM;
2401*4882a593Smuzhiyun
2402*4882a593Smuzhiyun error = sysfs_create_group(&dev->kobj, grp);
2403*4882a593Smuzhiyun if (error) {
2404*4882a593Smuzhiyun devres_free(devres);
2405*4882a593Smuzhiyun return error;
2406*4882a593Smuzhiyun }
2407*4882a593Smuzhiyun
2408*4882a593Smuzhiyun devres->group = grp;
2409*4882a593Smuzhiyun devres_add(dev, devres);
2410*4882a593Smuzhiyun return 0;
2411*4882a593Smuzhiyun }
2412*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(devm_device_add_group);
2413*4882a593Smuzhiyun
2414*4882a593Smuzhiyun /**
2415*4882a593Smuzhiyun * devm_device_remove_group: remove a managed group from a device
2416*4882a593Smuzhiyun * @dev: device to remove the group from
2417*4882a593Smuzhiyun * @grp: group to remove
2418*4882a593Smuzhiyun *
2419*4882a593Smuzhiyun * This function removes a group of attributes from a device. The attributes
2420*4882a593Smuzhiyun * previously have to have been created for this group, otherwise it will fail.
2421*4882a593Smuzhiyun */
devm_device_remove_group(struct device * dev,const struct attribute_group * grp)2422*4882a593Smuzhiyun void devm_device_remove_group(struct device *dev,
2423*4882a593Smuzhiyun const struct attribute_group *grp)
2424*4882a593Smuzhiyun {
2425*4882a593Smuzhiyun WARN_ON(devres_release(dev, devm_attr_group_remove,
2426*4882a593Smuzhiyun devm_attr_group_match,
2427*4882a593Smuzhiyun /* cast away const */ (void *)grp));
2428*4882a593Smuzhiyun }
2429*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(devm_device_remove_group);
2430*4882a593Smuzhiyun
2431*4882a593Smuzhiyun /**
2432*4882a593Smuzhiyun * devm_device_add_groups - create a bunch of managed attribute groups
2433*4882a593Smuzhiyun * @dev: The device to create the group for
2434*4882a593Smuzhiyun * @groups: The attribute groups to create, NULL terminated
2435*4882a593Smuzhiyun *
2436*4882a593Smuzhiyun * This function creates a bunch of managed attribute groups. If an error
2437*4882a593Smuzhiyun * occurs when creating a group, all previously created groups will be
2438*4882a593Smuzhiyun * removed, unwinding everything back to the original state when this
2439*4882a593Smuzhiyun * function was called. It will explicitly warn and error if any of the
2440*4882a593Smuzhiyun * attribute files being created already exist.
2441*4882a593Smuzhiyun *
2442*4882a593Smuzhiyun * Returns 0 on success or error code from sysfs_create_group on failure.
2443*4882a593Smuzhiyun */
devm_device_add_groups(struct device * dev,const struct attribute_group ** groups)2444*4882a593Smuzhiyun int devm_device_add_groups(struct device *dev,
2445*4882a593Smuzhiyun const struct attribute_group **groups)
2446*4882a593Smuzhiyun {
2447*4882a593Smuzhiyun union device_attr_group_devres *devres;
2448*4882a593Smuzhiyun int error;
2449*4882a593Smuzhiyun
2450*4882a593Smuzhiyun devres = devres_alloc(devm_attr_groups_remove,
2451*4882a593Smuzhiyun sizeof(*devres), GFP_KERNEL);
2452*4882a593Smuzhiyun if (!devres)
2453*4882a593Smuzhiyun return -ENOMEM;
2454*4882a593Smuzhiyun
2455*4882a593Smuzhiyun error = sysfs_create_groups(&dev->kobj, groups);
2456*4882a593Smuzhiyun if (error) {
2457*4882a593Smuzhiyun devres_free(devres);
2458*4882a593Smuzhiyun return error;
2459*4882a593Smuzhiyun }
2460*4882a593Smuzhiyun
2461*4882a593Smuzhiyun devres->groups = groups;
2462*4882a593Smuzhiyun devres_add(dev, devres);
2463*4882a593Smuzhiyun return 0;
2464*4882a593Smuzhiyun }
2465*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(devm_device_add_groups);
2466*4882a593Smuzhiyun
2467*4882a593Smuzhiyun /**
2468*4882a593Smuzhiyun * devm_device_remove_groups - remove a list of managed groups
2469*4882a593Smuzhiyun *
2470*4882a593Smuzhiyun * @dev: The device for the groups to be removed from
2471*4882a593Smuzhiyun * @groups: NULL terminated list of groups to be removed
2472*4882a593Smuzhiyun *
2473*4882a593Smuzhiyun * If groups is not NULL, remove the specified groups from the device.
2474*4882a593Smuzhiyun */
devm_device_remove_groups(struct device * dev,const struct attribute_group ** groups)2475*4882a593Smuzhiyun void devm_device_remove_groups(struct device *dev,
2476*4882a593Smuzhiyun const struct attribute_group **groups)
2477*4882a593Smuzhiyun {
2478*4882a593Smuzhiyun WARN_ON(devres_release(dev, devm_attr_groups_remove,
2479*4882a593Smuzhiyun devm_attr_group_match,
2480*4882a593Smuzhiyun /* cast away const */ (void *)groups));
2481*4882a593Smuzhiyun }
2482*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(devm_device_remove_groups);
2483*4882a593Smuzhiyun
device_add_attrs(struct device * dev)2484*4882a593Smuzhiyun static int device_add_attrs(struct device *dev)
2485*4882a593Smuzhiyun {
2486*4882a593Smuzhiyun struct class *class = dev->class;
2487*4882a593Smuzhiyun const struct device_type *type = dev->type;
2488*4882a593Smuzhiyun int error;
2489*4882a593Smuzhiyun
2490*4882a593Smuzhiyun if (class) {
2491*4882a593Smuzhiyun error = device_add_groups(dev, class->dev_groups);
2492*4882a593Smuzhiyun if (error)
2493*4882a593Smuzhiyun return error;
2494*4882a593Smuzhiyun }
2495*4882a593Smuzhiyun
2496*4882a593Smuzhiyun if (type) {
2497*4882a593Smuzhiyun error = device_add_groups(dev, type->groups);
2498*4882a593Smuzhiyun if (error)
2499*4882a593Smuzhiyun goto err_remove_class_groups;
2500*4882a593Smuzhiyun }
2501*4882a593Smuzhiyun
2502*4882a593Smuzhiyun error = device_add_groups(dev, dev->groups);
2503*4882a593Smuzhiyun if (error)
2504*4882a593Smuzhiyun goto err_remove_type_groups;
2505*4882a593Smuzhiyun
2506*4882a593Smuzhiyun if (device_supports_offline(dev) && !dev->offline_disabled) {
2507*4882a593Smuzhiyun error = device_create_file(dev, &dev_attr_online);
2508*4882a593Smuzhiyun if (error)
2509*4882a593Smuzhiyun goto err_remove_dev_groups;
2510*4882a593Smuzhiyun }
2511*4882a593Smuzhiyun
2512*4882a593Smuzhiyun if (fw_devlink_flags && !fw_devlink_is_permissive() && dev->fwnode) {
2513*4882a593Smuzhiyun error = device_create_file(dev, &dev_attr_waiting_for_supplier);
2514*4882a593Smuzhiyun if (error)
2515*4882a593Smuzhiyun goto err_remove_dev_online;
2516*4882a593Smuzhiyun }
2517*4882a593Smuzhiyun
2518*4882a593Smuzhiyun return 0;
2519*4882a593Smuzhiyun
2520*4882a593Smuzhiyun err_remove_dev_online:
2521*4882a593Smuzhiyun device_remove_file(dev, &dev_attr_online);
2522*4882a593Smuzhiyun err_remove_dev_groups:
2523*4882a593Smuzhiyun device_remove_groups(dev, dev->groups);
2524*4882a593Smuzhiyun err_remove_type_groups:
2525*4882a593Smuzhiyun if (type)
2526*4882a593Smuzhiyun device_remove_groups(dev, type->groups);
2527*4882a593Smuzhiyun err_remove_class_groups:
2528*4882a593Smuzhiyun if (class)
2529*4882a593Smuzhiyun device_remove_groups(dev, class->dev_groups);
2530*4882a593Smuzhiyun
2531*4882a593Smuzhiyun return error;
2532*4882a593Smuzhiyun }
2533*4882a593Smuzhiyun
device_remove_attrs(struct device * dev)2534*4882a593Smuzhiyun static void device_remove_attrs(struct device *dev)
2535*4882a593Smuzhiyun {
2536*4882a593Smuzhiyun struct class *class = dev->class;
2537*4882a593Smuzhiyun const struct device_type *type = dev->type;
2538*4882a593Smuzhiyun
2539*4882a593Smuzhiyun device_remove_file(dev, &dev_attr_waiting_for_supplier);
2540*4882a593Smuzhiyun device_remove_file(dev, &dev_attr_online);
2541*4882a593Smuzhiyun device_remove_groups(dev, dev->groups);
2542*4882a593Smuzhiyun
2543*4882a593Smuzhiyun if (type)
2544*4882a593Smuzhiyun device_remove_groups(dev, type->groups);
2545*4882a593Smuzhiyun
2546*4882a593Smuzhiyun if (class)
2547*4882a593Smuzhiyun device_remove_groups(dev, class->dev_groups);
2548*4882a593Smuzhiyun }
2549*4882a593Smuzhiyun
dev_show(struct device * dev,struct device_attribute * attr,char * buf)2550*4882a593Smuzhiyun static ssize_t dev_show(struct device *dev, struct device_attribute *attr,
2551*4882a593Smuzhiyun char *buf)
2552*4882a593Smuzhiyun {
2553*4882a593Smuzhiyun return print_dev_t(buf, dev->devt);
2554*4882a593Smuzhiyun }
2555*4882a593Smuzhiyun static DEVICE_ATTR_RO(dev);
2556*4882a593Smuzhiyun
2557*4882a593Smuzhiyun /* /sys/devices/ */
2558*4882a593Smuzhiyun struct kset *devices_kset;
2559*4882a593Smuzhiyun
2560*4882a593Smuzhiyun /**
2561*4882a593Smuzhiyun * devices_kset_move_before - Move device in the devices_kset's list.
2562*4882a593Smuzhiyun * @deva: Device to move.
2563*4882a593Smuzhiyun * @devb: Device @deva should come before.
2564*4882a593Smuzhiyun */
devices_kset_move_before(struct device * deva,struct device * devb)2565*4882a593Smuzhiyun static void devices_kset_move_before(struct device *deva, struct device *devb)
2566*4882a593Smuzhiyun {
2567*4882a593Smuzhiyun if (!devices_kset)
2568*4882a593Smuzhiyun return;
2569*4882a593Smuzhiyun pr_debug("devices_kset: Moving %s before %s\n",
2570*4882a593Smuzhiyun dev_name(deva), dev_name(devb));
2571*4882a593Smuzhiyun spin_lock(&devices_kset->list_lock);
2572*4882a593Smuzhiyun list_move_tail(&deva->kobj.entry, &devb->kobj.entry);
2573*4882a593Smuzhiyun spin_unlock(&devices_kset->list_lock);
2574*4882a593Smuzhiyun }
2575*4882a593Smuzhiyun
2576*4882a593Smuzhiyun /**
2577*4882a593Smuzhiyun * devices_kset_move_after - Move device in the devices_kset's list.
2578*4882a593Smuzhiyun * @deva: Device to move
2579*4882a593Smuzhiyun * @devb: Device @deva should come after.
2580*4882a593Smuzhiyun */
devices_kset_move_after(struct device * deva,struct device * devb)2581*4882a593Smuzhiyun static void devices_kset_move_after(struct device *deva, struct device *devb)
2582*4882a593Smuzhiyun {
2583*4882a593Smuzhiyun if (!devices_kset)
2584*4882a593Smuzhiyun return;
2585*4882a593Smuzhiyun pr_debug("devices_kset: Moving %s after %s\n",
2586*4882a593Smuzhiyun dev_name(deva), dev_name(devb));
2587*4882a593Smuzhiyun spin_lock(&devices_kset->list_lock);
2588*4882a593Smuzhiyun list_move(&deva->kobj.entry, &devb->kobj.entry);
2589*4882a593Smuzhiyun spin_unlock(&devices_kset->list_lock);
2590*4882a593Smuzhiyun }
2591*4882a593Smuzhiyun
2592*4882a593Smuzhiyun /**
2593*4882a593Smuzhiyun * devices_kset_move_last - move the device to the end of devices_kset's list.
2594*4882a593Smuzhiyun * @dev: device to move
2595*4882a593Smuzhiyun */
devices_kset_move_last(struct device * dev)2596*4882a593Smuzhiyun void devices_kset_move_last(struct device *dev)
2597*4882a593Smuzhiyun {
2598*4882a593Smuzhiyun if (!devices_kset)
2599*4882a593Smuzhiyun return;
2600*4882a593Smuzhiyun pr_debug("devices_kset: Moving %s to end of list\n", dev_name(dev));
2601*4882a593Smuzhiyun spin_lock(&devices_kset->list_lock);
2602*4882a593Smuzhiyun list_move_tail(&dev->kobj.entry, &devices_kset->list);
2603*4882a593Smuzhiyun spin_unlock(&devices_kset->list_lock);
2604*4882a593Smuzhiyun }
2605*4882a593Smuzhiyun
2606*4882a593Smuzhiyun /**
2607*4882a593Smuzhiyun * device_create_file - create sysfs attribute file for device.
2608*4882a593Smuzhiyun * @dev: device.
2609*4882a593Smuzhiyun * @attr: device attribute descriptor.
2610*4882a593Smuzhiyun */
device_create_file(struct device * dev,const struct device_attribute * attr)2611*4882a593Smuzhiyun int device_create_file(struct device *dev,
2612*4882a593Smuzhiyun const struct device_attribute *attr)
2613*4882a593Smuzhiyun {
2614*4882a593Smuzhiyun int error = 0;
2615*4882a593Smuzhiyun
2616*4882a593Smuzhiyun if (dev) {
2617*4882a593Smuzhiyun WARN(((attr->attr.mode & S_IWUGO) && !attr->store),
2618*4882a593Smuzhiyun "Attribute %s: write permission without 'store'\n",
2619*4882a593Smuzhiyun attr->attr.name);
2620*4882a593Smuzhiyun WARN(((attr->attr.mode & S_IRUGO) && !attr->show),
2621*4882a593Smuzhiyun "Attribute %s: read permission without 'show'\n",
2622*4882a593Smuzhiyun attr->attr.name);
2623*4882a593Smuzhiyun error = sysfs_create_file(&dev->kobj, &attr->attr);
2624*4882a593Smuzhiyun }
2625*4882a593Smuzhiyun
2626*4882a593Smuzhiyun return error;
2627*4882a593Smuzhiyun }
2628*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(device_create_file);
2629*4882a593Smuzhiyun
2630*4882a593Smuzhiyun /**
2631*4882a593Smuzhiyun * device_remove_file - remove sysfs attribute file.
2632*4882a593Smuzhiyun * @dev: device.
2633*4882a593Smuzhiyun * @attr: device attribute descriptor.
2634*4882a593Smuzhiyun */
device_remove_file(struct device * dev,const struct device_attribute * attr)2635*4882a593Smuzhiyun void device_remove_file(struct device *dev,
2636*4882a593Smuzhiyun const struct device_attribute *attr)
2637*4882a593Smuzhiyun {
2638*4882a593Smuzhiyun if (dev)
2639*4882a593Smuzhiyun sysfs_remove_file(&dev->kobj, &attr->attr);
2640*4882a593Smuzhiyun }
2641*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(device_remove_file);
2642*4882a593Smuzhiyun
2643*4882a593Smuzhiyun /**
2644*4882a593Smuzhiyun * device_remove_file_self - remove sysfs attribute file from its own method.
2645*4882a593Smuzhiyun * @dev: device.
2646*4882a593Smuzhiyun * @attr: device attribute descriptor.
2647*4882a593Smuzhiyun *
2648*4882a593Smuzhiyun * See kernfs_remove_self() for details.
2649*4882a593Smuzhiyun */
device_remove_file_self(struct device * dev,const struct device_attribute * attr)2650*4882a593Smuzhiyun bool device_remove_file_self(struct device *dev,
2651*4882a593Smuzhiyun const struct device_attribute *attr)
2652*4882a593Smuzhiyun {
2653*4882a593Smuzhiyun if (dev)
2654*4882a593Smuzhiyun return sysfs_remove_file_self(&dev->kobj, &attr->attr);
2655*4882a593Smuzhiyun else
2656*4882a593Smuzhiyun return false;
2657*4882a593Smuzhiyun }
2658*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(device_remove_file_self);
2659*4882a593Smuzhiyun
2660*4882a593Smuzhiyun /**
2661*4882a593Smuzhiyun * device_create_bin_file - create sysfs binary attribute file for device.
2662*4882a593Smuzhiyun * @dev: device.
2663*4882a593Smuzhiyun * @attr: device binary attribute descriptor.
2664*4882a593Smuzhiyun */
device_create_bin_file(struct device * dev,const struct bin_attribute * attr)2665*4882a593Smuzhiyun int device_create_bin_file(struct device *dev,
2666*4882a593Smuzhiyun const struct bin_attribute *attr)
2667*4882a593Smuzhiyun {
2668*4882a593Smuzhiyun int error = -EINVAL;
2669*4882a593Smuzhiyun if (dev)
2670*4882a593Smuzhiyun error = sysfs_create_bin_file(&dev->kobj, attr);
2671*4882a593Smuzhiyun return error;
2672*4882a593Smuzhiyun }
2673*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(device_create_bin_file);
2674*4882a593Smuzhiyun
2675*4882a593Smuzhiyun /**
2676*4882a593Smuzhiyun * device_remove_bin_file - remove sysfs binary attribute file
2677*4882a593Smuzhiyun * @dev: device.
2678*4882a593Smuzhiyun * @attr: device binary attribute descriptor.
2679*4882a593Smuzhiyun */
device_remove_bin_file(struct device * dev,const struct bin_attribute * attr)2680*4882a593Smuzhiyun void device_remove_bin_file(struct device *dev,
2681*4882a593Smuzhiyun const struct bin_attribute *attr)
2682*4882a593Smuzhiyun {
2683*4882a593Smuzhiyun if (dev)
2684*4882a593Smuzhiyun sysfs_remove_bin_file(&dev->kobj, attr);
2685*4882a593Smuzhiyun }
2686*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(device_remove_bin_file);
2687*4882a593Smuzhiyun
klist_children_get(struct klist_node * n)2688*4882a593Smuzhiyun static void klist_children_get(struct klist_node *n)
2689*4882a593Smuzhiyun {
2690*4882a593Smuzhiyun struct device_private *p = to_device_private_parent(n);
2691*4882a593Smuzhiyun struct device *dev = p->device;
2692*4882a593Smuzhiyun
2693*4882a593Smuzhiyun get_device(dev);
2694*4882a593Smuzhiyun }
2695*4882a593Smuzhiyun
klist_children_put(struct klist_node * n)2696*4882a593Smuzhiyun static void klist_children_put(struct klist_node *n)
2697*4882a593Smuzhiyun {
2698*4882a593Smuzhiyun struct device_private *p = to_device_private_parent(n);
2699*4882a593Smuzhiyun struct device *dev = p->device;
2700*4882a593Smuzhiyun
2701*4882a593Smuzhiyun put_device(dev);
2702*4882a593Smuzhiyun }
2703*4882a593Smuzhiyun
2704*4882a593Smuzhiyun /**
2705*4882a593Smuzhiyun * device_initialize - init device structure.
2706*4882a593Smuzhiyun * @dev: device.
2707*4882a593Smuzhiyun *
2708*4882a593Smuzhiyun * This prepares the device for use by other layers by initializing
2709*4882a593Smuzhiyun * its fields.
2710*4882a593Smuzhiyun * It is the first half of device_register(), if called by
2711*4882a593Smuzhiyun * that function, though it can also be called separately, so one
2712*4882a593Smuzhiyun * may use @dev's fields. In particular, get_device()/put_device()
2713*4882a593Smuzhiyun * may be used for reference counting of @dev after calling this
2714*4882a593Smuzhiyun * function.
2715*4882a593Smuzhiyun *
2716*4882a593Smuzhiyun * All fields in @dev must be initialized by the caller to 0, except
2717*4882a593Smuzhiyun * for those explicitly set to some other value. The simplest
2718*4882a593Smuzhiyun * approach is to use kzalloc() to allocate the structure containing
2719*4882a593Smuzhiyun * @dev.
2720*4882a593Smuzhiyun *
2721*4882a593Smuzhiyun * NOTE: Use put_device() to give up your reference instead of freeing
2722*4882a593Smuzhiyun * @dev directly once you have called this function.
2723*4882a593Smuzhiyun */
device_initialize(struct device * dev)2724*4882a593Smuzhiyun void device_initialize(struct device *dev)
2725*4882a593Smuzhiyun {
2726*4882a593Smuzhiyun dev->kobj.kset = devices_kset;
2727*4882a593Smuzhiyun kobject_init(&dev->kobj, &device_ktype);
2728*4882a593Smuzhiyun INIT_LIST_HEAD(&dev->dma_pools);
2729*4882a593Smuzhiyun mutex_init(&dev->mutex);
2730*4882a593Smuzhiyun #ifdef CONFIG_PROVE_LOCKING
2731*4882a593Smuzhiyun mutex_init(&dev->lockdep_mutex);
2732*4882a593Smuzhiyun #endif
2733*4882a593Smuzhiyun lockdep_set_novalidate_class(&dev->mutex);
2734*4882a593Smuzhiyun spin_lock_init(&dev->devres_lock);
2735*4882a593Smuzhiyun INIT_LIST_HEAD(&dev->devres_head);
2736*4882a593Smuzhiyun device_pm_init(dev);
2737*4882a593Smuzhiyun set_dev_node(dev, -1);
2738*4882a593Smuzhiyun #ifdef CONFIG_GENERIC_MSI_IRQ
2739*4882a593Smuzhiyun INIT_LIST_HEAD(&dev->msi_list);
2740*4882a593Smuzhiyun #endif
2741*4882a593Smuzhiyun INIT_LIST_HEAD(&dev->links.consumers);
2742*4882a593Smuzhiyun INIT_LIST_HEAD(&dev->links.suppliers);
2743*4882a593Smuzhiyun INIT_LIST_HEAD(&dev->links.defer_sync);
2744*4882a593Smuzhiyun dev->links.status = DL_DEV_NO_DRIVER;
2745*4882a593Smuzhiyun }
2746*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(device_initialize);
2747*4882a593Smuzhiyun
virtual_device_parent(struct device * dev)2748*4882a593Smuzhiyun struct kobject *virtual_device_parent(struct device *dev)
2749*4882a593Smuzhiyun {
2750*4882a593Smuzhiyun static struct kobject *virtual_dir = NULL;
2751*4882a593Smuzhiyun
2752*4882a593Smuzhiyun if (!virtual_dir)
2753*4882a593Smuzhiyun virtual_dir = kobject_create_and_add("virtual",
2754*4882a593Smuzhiyun &devices_kset->kobj);
2755*4882a593Smuzhiyun
2756*4882a593Smuzhiyun return virtual_dir;
2757*4882a593Smuzhiyun }
2758*4882a593Smuzhiyun
2759*4882a593Smuzhiyun struct class_dir {
2760*4882a593Smuzhiyun struct kobject kobj;
2761*4882a593Smuzhiyun struct class *class;
2762*4882a593Smuzhiyun };
2763*4882a593Smuzhiyun
2764*4882a593Smuzhiyun #define to_class_dir(obj) container_of(obj, struct class_dir, kobj)
2765*4882a593Smuzhiyun
class_dir_release(struct kobject * kobj)2766*4882a593Smuzhiyun static void class_dir_release(struct kobject *kobj)
2767*4882a593Smuzhiyun {
2768*4882a593Smuzhiyun struct class_dir *dir = to_class_dir(kobj);
2769*4882a593Smuzhiyun kfree(dir);
2770*4882a593Smuzhiyun }
2771*4882a593Smuzhiyun
2772*4882a593Smuzhiyun static const
class_dir_child_ns_type(struct kobject * kobj)2773*4882a593Smuzhiyun struct kobj_ns_type_operations *class_dir_child_ns_type(struct kobject *kobj)
2774*4882a593Smuzhiyun {
2775*4882a593Smuzhiyun struct class_dir *dir = to_class_dir(kobj);
2776*4882a593Smuzhiyun return dir->class->ns_type;
2777*4882a593Smuzhiyun }
2778*4882a593Smuzhiyun
2779*4882a593Smuzhiyun static struct kobj_type class_dir_ktype = {
2780*4882a593Smuzhiyun .release = class_dir_release,
2781*4882a593Smuzhiyun .sysfs_ops = &kobj_sysfs_ops,
2782*4882a593Smuzhiyun .child_ns_type = class_dir_child_ns_type
2783*4882a593Smuzhiyun };
2784*4882a593Smuzhiyun
2785*4882a593Smuzhiyun static struct kobject *
class_dir_create_and_add(struct class * class,struct kobject * parent_kobj)2786*4882a593Smuzhiyun class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
2787*4882a593Smuzhiyun {
2788*4882a593Smuzhiyun struct class_dir *dir;
2789*4882a593Smuzhiyun int retval;
2790*4882a593Smuzhiyun
2791*4882a593Smuzhiyun dir = kzalloc(sizeof(*dir), GFP_KERNEL);
2792*4882a593Smuzhiyun if (!dir)
2793*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
2794*4882a593Smuzhiyun
2795*4882a593Smuzhiyun dir->class = class;
2796*4882a593Smuzhiyun kobject_init(&dir->kobj, &class_dir_ktype);
2797*4882a593Smuzhiyun
2798*4882a593Smuzhiyun dir->kobj.kset = &class->p->glue_dirs;
2799*4882a593Smuzhiyun
2800*4882a593Smuzhiyun retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name);
2801*4882a593Smuzhiyun if (retval < 0) {
2802*4882a593Smuzhiyun kobject_put(&dir->kobj);
2803*4882a593Smuzhiyun return ERR_PTR(retval);
2804*4882a593Smuzhiyun }
2805*4882a593Smuzhiyun return &dir->kobj;
2806*4882a593Smuzhiyun }
2807*4882a593Smuzhiyun
2808*4882a593Smuzhiyun static DEFINE_MUTEX(gdp_mutex);
2809*4882a593Smuzhiyun
get_device_parent(struct device * dev,struct device * parent)2810*4882a593Smuzhiyun static struct kobject *get_device_parent(struct device *dev,
2811*4882a593Smuzhiyun struct device *parent)
2812*4882a593Smuzhiyun {
2813*4882a593Smuzhiyun if (dev->class) {
2814*4882a593Smuzhiyun struct kobject *kobj = NULL;
2815*4882a593Smuzhiyun struct kobject *parent_kobj;
2816*4882a593Smuzhiyun struct kobject *k;
2817*4882a593Smuzhiyun
2818*4882a593Smuzhiyun #ifdef CONFIG_BLOCK
2819*4882a593Smuzhiyun /* block disks show up in /sys/block */
2820*4882a593Smuzhiyun if (sysfs_deprecated && dev->class == &block_class) {
2821*4882a593Smuzhiyun if (parent && parent->class == &block_class)
2822*4882a593Smuzhiyun return &parent->kobj;
2823*4882a593Smuzhiyun return &block_class.p->subsys.kobj;
2824*4882a593Smuzhiyun }
2825*4882a593Smuzhiyun #endif
2826*4882a593Smuzhiyun
2827*4882a593Smuzhiyun /*
2828*4882a593Smuzhiyun * If we have no parent, we live in "virtual".
2829*4882a593Smuzhiyun * Class-devices with a non class-device as parent, live
2830*4882a593Smuzhiyun * in a "glue" directory to prevent namespace collisions.
2831*4882a593Smuzhiyun */
2832*4882a593Smuzhiyun if (parent == NULL)
2833*4882a593Smuzhiyun parent_kobj = virtual_device_parent(dev);
2834*4882a593Smuzhiyun else if (parent->class && !dev->class->ns_type)
2835*4882a593Smuzhiyun return &parent->kobj;
2836*4882a593Smuzhiyun else
2837*4882a593Smuzhiyun parent_kobj = &parent->kobj;
2838*4882a593Smuzhiyun
2839*4882a593Smuzhiyun mutex_lock(&gdp_mutex);
2840*4882a593Smuzhiyun
2841*4882a593Smuzhiyun /* find our class-directory at the parent and reference it */
2842*4882a593Smuzhiyun spin_lock(&dev->class->p->glue_dirs.list_lock);
2843*4882a593Smuzhiyun list_for_each_entry(k, &dev->class->p->glue_dirs.list, entry)
2844*4882a593Smuzhiyun if (k->parent == parent_kobj) {
2845*4882a593Smuzhiyun kobj = kobject_get(k);
2846*4882a593Smuzhiyun break;
2847*4882a593Smuzhiyun }
2848*4882a593Smuzhiyun spin_unlock(&dev->class->p->glue_dirs.list_lock);
2849*4882a593Smuzhiyun if (kobj) {
2850*4882a593Smuzhiyun mutex_unlock(&gdp_mutex);
2851*4882a593Smuzhiyun return kobj;
2852*4882a593Smuzhiyun }
2853*4882a593Smuzhiyun
2854*4882a593Smuzhiyun /* or create a new class-directory at the parent device */
2855*4882a593Smuzhiyun k = class_dir_create_and_add(dev->class, parent_kobj);
2856*4882a593Smuzhiyun /* do not emit an uevent for this simple "glue" directory */
2857*4882a593Smuzhiyun mutex_unlock(&gdp_mutex);
2858*4882a593Smuzhiyun return k;
2859*4882a593Smuzhiyun }
2860*4882a593Smuzhiyun
2861*4882a593Smuzhiyun /* subsystems can specify a default root directory for their devices */
2862*4882a593Smuzhiyun if (!parent && dev->bus && dev->bus->dev_root)
2863*4882a593Smuzhiyun return &dev->bus->dev_root->kobj;
2864*4882a593Smuzhiyun
2865*4882a593Smuzhiyun if (parent)
2866*4882a593Smuzhiyun return &parent->kobj;
2867*4882a593Smuzhiyun return NULL;
2868*4882a593Smuzhiyun }
2869*4882a593Smuzhiyun
live_in_glue_dir(struct kobject * kobj,struct device * dev)2870*4882a593Smuzhiyun static inline bool live_in_glue_dir(struct kobject *kobj,
2871*4882a593Smuzhiyun struct device *dev)
2872*4882a593Smuzhiyun {
2873*4882a593Smuzhiyun if (!kobj || !dev->class ||
2874*4882a593Smuzhiyun kobj->kset != &dev->class->p->glue_dirs)
2875*4882a593Smuzhiyun return false;
2876*4882a593Smuzhiyun return true;
2877*4882a593Smuzhiyun }
2878*4882a593Smuzhiyun
get_glue_dir(struct device * dev)2879*4882a593Smuzhiyun static inline struct kobject *get_glue_dir(struct device *dev)
2880*4882a593Smuzhiyun {
2881*4882a593Smuzhiyun return dev->kobj.parent;
2882*4882a593Smuzhiyun }
2883*4882a593Smuzhiyun
2884*4882a593Smuzhiyun /*
2885*4882a593Smuzhiyun * make sure cleaning up dir as the last step, we need to make
2886*4882a593Smuzhiyun * sure .release handler of kobject is run with holding the
2887*4882a593Smuzhiyun * global lock
2888*4882a593Smuzhiyun */
cleanup_glue_dir(struct device * dev,struct kobject * glue_dir)2889*4882a593Smuzhiyun static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
2890*4882a593Smuzhiyun {
2891*4882a593Smuzhiyun unsigned int ref;
2892*4882a593Smuzhiyun
2893*4882a593Smuzhiyun /* see if we live in a "glue" directory */
2894*4882a593Smuzhiyun if (!live_in_glue_dir(glue_dir, dev))
2895*4882a593Smuzhiyun return;
2896*4882a593Smuzhiyun
2897*4882a593Smuzhiyun mutex_lock(&gdp_mutex);
2898*4882a593Smuzhiyun /**
2899*4882a593Smuzhiyun * There is a race condition between removing glue directory
2900*4882a593Smuzhiyun * and adding a new device under the glue directory.
2901*4882a593Smuzhiyun *
2902*4882a593Smuzhiyun * CPU1: CPU2:
2903*4882a593Smuzhiyun *
2904*4882a593Smuzhiyun * device_add()
2905*4882a593Smuzhiyun * get_device_parent()
2906*4882a593Smuzhiyun * class_dir_create_and_add()
2907*4882a593Smuzhiyun * kobject_add_internal()
2908*4882a593Smuzhiyun * create_dir() // create glue_dir
2909*4882a593Smuzhiyun *
2910*4882a593Smuzhiyun * device_add()
2911*4882a593Smuzhiyun * get_device_parent()
2912*4882a593Smuzhiyun * kobject_get() // get glue_dir
2913*4882a593Smuzhiyun *
2914*4882a593Smuzhiyun * device_del()
2915*4882a593Smuzhiyun * cleanup_glue_dir()
2916*4882a593Smuzhiyun * kobject_del(glue_dir)
2917*4882a593Smuzhiyun *
2918*4882a593Smuzhiyun * kobject_add()
2919*4882a593Smuzhiyun * kobject_add_internal()
2920*4882a593Smuzhiyun * create_dir() // in glue_dir
2921*4882a593Smuzhiyun * sysfs_create_dir_ns()
2922*4882a593Smuzhiyun * kernfs_create_dir_ns(sd)
2923*4882a593Smuzhiyun *
2924*4882a593Smuzhiyun * sysfs_remove_dir() // glue_dir->sd=NULL
2925*4882a593Smuzhiyun * sysfs_put() // free glue_dir->sd
2926*4882a593Smuzhiyun *
2927*4882a593Smuzhiyun * // sd is freed
2928*4882a593Smuzhiyun * kernfs_new_node(sd)
2929*4882a593Smuzhiyun * kernfs_get(glue_dir)
2930*4882a593Smuzhiyun * kernfs_add_one()
2931*4882a593Smuzhiyun * kernfs_put()
2932*4882a593Smuzhiyun *
2933*4882a593Smuzhiyun * Before CPU1 remove last child device under glue dir, if CPU2 add
2934*4882a593Smuzhiyun * a new device under glue dir, the glue_dir kobject reference count
2935*4882a593Smuzhiyun * will be increase to 2 in kobject_get(k). And CPU2 has been called
2936*4882a593Smuzhiyun * kernfs_create_dir_ns(). Meanwhile, CPU1 call sysfs_remove_dir()
2937*4882a593Smuzhiyun * and sysfs_put(). This result in glue_dir->sd is freed.
2938*4882a593Smuzhiyun *
2939*4882a593Smuzhiyun * Then the CPU2 will see a stale "empty" but still potentially used
2940*4882a593Smuzhiyun * glue dir around in kernfs_new_node().
2941*4882a593Smuzhiyun *
2942*4882a593Smuzhiyun * In order to avoid this happening, we also should make sure that
2943*4882a593Smuzhiyun * kernfs_node for glue_dir is released in CPU1 only when refcount
2944*4882a593Smuzhiyun * for glue_dir kobj is 1.
2945*4882a593Smuzhiyun */
2946*4882a593Smuzhiyun ref = kref_read(&glue_dir->kref);
2947*4882a593Smuzhiyun if (!kobject_has_children(glue_dir) && !--ref)
2948*4882a593Smuzhiyun kobject_del(glue_dir);
2949*4882a593Smuzhiyun kobject_put(glue_dir);
2950*4882a593Smuzhiyun mutex_unlock(&gdp_mutex);
2951*4882a593Smuzhiyun }
2952*4882a593Smuzhiyun
device_add_class_symlinks(struct device * dev)2953*4882a593Smuzhiyun static int device_add_class_symlinks(struct device *dev)
2954*4882a593Smuzhiyun {
2955*4882a593Smuzhiyun struct device_node *of_node = dev_of_node(dev);
2956*4882a593Smuzhiyun int error;
2957*4882a593Smuzhiyun
2958*4882a593Smuzhiyun if (of_node) {
2959*4882a593Smuzhiyun error = sysfs_create_link(&dev->kobj, of_node_kobj(of_node), "of_node");
2960*4882a593Smuzhiyun if (error)
2961*4882a593Smuzhiyun dev_warn(dev, "Error %d creating of_node link\n",error);
2962*4882a593Smuzhiyun /* An error here doesn't warrant bringing down the device */
2963*4882a593Smuzhiyun }
2964*4882a593Smuzhiyun
2965*4882a593Smuzhiyun if (!dev->class)
2966*4882a593Smuzhiyun return 0;
2967*4882a593Smuzhiyun
2968*4882a593Smuzhiyun error = sysfs_create_link(&dev->kobj,
2969*4882a593Smuzhiyun &dev->class->p->subsys.kobj,
2970*4882a593Smuzhiyun "subsystem");
2971*4882a593Smuzhiyun if (error)
2972*4882a593Smuzhiyun goto out_devnode;
2973*4882a593Smuzhiyun
2974*4882a593Smuzhiyun if (dev->parent && device_is_not_partition(dev)) {
2975*4882a593Smuzhiyun error = sysfs_create_link(&dev->kobj, &dev->parent->kobj,
2976*4882a593Smuzhiyun "device");
2977*4882a593Smuzhiyun if (error)
2978*4882a593Smuzhiyun goto out_subsys;
2979*4882a593Smuzhiyun }
2980*4882a593Smuzhiyun
2981*4882a593Smuzhiyun #ifdef CONFIG_BLOCK
2982*4882a593Smuzhiyun /* /sys/block has directories and does not need symlinks */
2983*4882a593Smuzhiyun if (sysfs_deprecated && dev->class == &block_class)
2984*4882a593Smuzhiyun return 0;
2985*4882a593Smuzhiyun #endif
2986*4882a593Smuzhiyun
2987*4882a593Smuzhiyun /* link in the class directory pointing to the device */
2988*4882a593Smuzhiyun error = sysfs_create_link(&dev->class->p->subsys.kobj,
2989*4882a593Smuzhiyun &dev->kobj, dev_name(dev));
2990*4882a593Smuzhiyun if (error)
2991*4882a593Smuzhiyun goto out_device;
2992*4882a593Smuzhiyun
2993*4882a593Smuzhiyun return 0;
2994*4882a593Smuzhiyun
2995*4882a593Smuzhiyun out_device:
2996*4882a593Smuzhiyun sysfs_remove_link(&dev->kobj, "device");
2997*4882a593Smuzhiyun
2998*4882a593Smuzhiyun out_subsys:
2999*4882a593Smuzhiyun sysfs_remove_link(&dev->kobj, "subsystem");
3000*4882a593Smuzhiyun out_devnode:
3001*4882a593Smuzhiyun sysfs_remove_link(&dev->kobj, "of_node");
3002*4882a593Smuzhiyun return error;
3003*4882a593Smuzhiyun }
3004*4882a593Smuzhiyun
device_remove_class_symlinks(struct device * dev)3005*4882a593Smuzhiyun static void device_remove_class_symlinks(struct device *dev)
3006*4882a593Smuzhiyun {
3007*4882a593Smuzhiyun if (dev_of_node(dev))
3008*4882a593Smuzhiyun sysfs_remove_link(&dev->kobj, "of_node");
3009*4882a593Smuzhiyun
3010*4882a593Smuzhiyun if (!dev->class)
3011*4882a593Smuzhiyun return;
3012*4882a593Smuzhiyun
3013*4882a593Smuzhiyun if (dev->parent && device_is_not_partition(dev))
3014*4882a593Smuzhiyun sysfs_remove_link(&dev->kobj, "device");
3015*4882a593Smuzhiyun sysfs_remove_link(&dev->kobj, "subsystem");
3016*4882a593Smuzhiyun #ifdef CONFIG_BLOCK
3017*4882a593Smuzhiyun if (sysfs_deprecated && dev->class == &block_class)
3018*4882a593Smuzhiyun return;
3019*4882a593Smuzhiyun #endif
3020*4882a593Smuzhiyun sysfs_delete_link(&dev->class->p->subsys.kobj, &dev->kobj, dev_name(dev));
3021*4882a593Smuzhiyun }
3022*4882a593Smuzhiyun
3023*4882a593Smuzhiyun /**
3024*4882a593Smuzhiyun * dev_set_name - set a device name
3025*4882a593Smuzhiyun * @dev: device
3026*4882a593Smuzhiyun * @fmt: format string for the device's name
3027*4882a593Smuzhiyun */
dev_set_name(struct device * dev,const char * fmt,...)3028*4882a593Smuzhiyun int dev_set_name(struct device *dev, const char *fmt, ...)
3029*4882a593Smuzhiyun {
3030*4882a593Smuzhiyun va_list vargs;
3031*4882a593Smuzhiyun int err;
3032*4882a593Smuzhiyun
3033*4882a593Smuzhiyun va_start(vargs, fmt);
3034*4882a593Smuzhiyun err = kobject_set_name_vargs(&dev->kobj, fmt, vargs);
3035*4882a593Smuzhiyun va_end(vargs);
3036*4882a593Smuzhiyun return err;
3037*4882a593Smuzhiyun }
3038*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_set_name);
3039*4882a593Smuzhiyun
3040*4882a593Smuzhiyun /**
3041*4882a593Smuzhiyun * device_to_dev_kobj - select a /sys/dev/ directory for the device
3042*4882a593Smuzhiyun * @dev: device
3043*4882a593Smuzhiyun *
3044*4882a593Smuzhiyun * By default we select char/ for new entries. Setting class->dev_obj
3045*4882a593Smuzhiyun * to NULL prevents an entry from being created. class->dev_kobj must
3046*4882a593Smuzhiyun * be set (or cleared) before any devices are registered to the class
3047*4882a593Smuzhiyun * otherwise device_create_sys_dev_entry() and
3048*4882a593Smuzhiyun * device_remove_sys_dev_entry() will disagree about the presence of
3049*4882a593Smuzhiyun * the link.
3050*4882a593Smuzhiyun */
device_to_dev_kobj(struct device * dev)3051*4882a593Smuzhiyun static struct kobject *device_to_dev_kobj(struct device *dev)
3052*4882a593Smuzhiyun {
3053*4882a593Smuzhiyun struct kobject *kobj;
3054*4882a593Smuzhiyun
3055*4882a593Smuzhiyun if (dev->class)
3056*4882a593Smuzhiyun kobj = dev->class->dev_kobj;
3057*4882a593Smuzhiyun else
3058*4882a593Smuzhiyun kobj = sysfs_dev_char_kobj;
3059*4882a593Smuzhiyun
3060*4882a593Smuzhiyun return kobj;
3061*4882a593Smuzhiyun }
3062*4882a593Smuzhiyun
device_create_sys_dev_entry(struct device * dev)3063*4882a593Smuzhiyun static int device_create_sys_dev_entry(struct device *dev)
3064*4882a593Smuzhiyun {
3065*4882a593Smuzhiyun struct kobject *kobj = device_to_dev_kobj(dev);
3066*4882a593Smuzhiyun int error = 0;
3067*4882a593Smuzhiyun char devt_str[15];
3068*4882a593Smuzhiyun
3069*4882a593Smuzhiyun if (kobj) {
3070*4882a593Smuzhiyun format_dev_t(devt_str, dev->devt);
3071*4882a593Smuzhiyun error = sysfs_create_link(kobj, &dev->kobj, devt_str);
3072*4882a593Smuzhiyun }
3073*4882a593Smuzhiyun
3074*4882a593Smuzhiyun return error;
3075*4882a593Smuzhiyun }
3076*4882a593Smuzhiyun
device_remove_sys_dev_entry(struct device * dev)3077*4882a593Smuzhiyun static void device_remove_sys_dev_entry(struct device *dev)
3078*4882a593Smuzhiyun {
3079*4882a593Smuzhiyun struct kobject *kobj = device_to_dev_kobj(dev);
3080*4882a593Smuzhiyun char devt_str[15];
3081*4882a593Smuzhiyun
3082*4882a593Smuzhiyun if (kobj) {
3083*4882a593Smuzhiyun format_dev_t(devt_str, dev->devt);
3084*4882a593Smuzhiyun sysfs_remove_link(kobj, devt_str);
3085*4882a593Smuzhiyun }
3086*4882a593Smuzhiyun }
3087*4882a593Smuzhiyun
device_private_init(struct device * dev)3088*4882a593Smuzhiyun static int device_private_init(struct device *dev)
3089*4882a593Smuzhiyun {
3090*4882a593Smuzhiyun dev->p = kzalloc(sizeof(*dev->p), GFP_KERNEL);
3091*4882a593Smuzhiyun if (!dev->p)
3092*4882a593Smuzhiyun return -ENOMEM;
3093*4882a593Smuzhiyun dev->p->device = dev;
3094*4882a593Smuzhiyun klist_init(&dev->p->klist_children, klist_children_get,
3095*4882a593Smuzhiyun klist_children_put);
3096*4882a593Smuzhiyun INIT_LIST_HEAD(&dev->p->deferred_probe);
3097*4882a593Smuzhiyun return 0;
3098*4882a593Smuzhiyun }
3099*4882a593Smuzhiyun
3100*4882a593Smuzhiyun /**
3101*4882a593Smuzhiyun * device_add - add device to device hierarchy.
3102*4882a593Smuzhiyun * @dev: device.
3103*4882a593Smuzhiyun *
3104*4882a593Smuzhiyun * This is part 2 of device_register(), though may be called
3105*4882a593Smuzhiyun * separately _iff_ device_initialize() has been called separately.
3106*4882a593Smuzhiyun *
3107*4882a593Smuzhiyun * This adds @dev to the kobject hierarchy via kobject_add(), adds it
3108*4882a593Smuzhiyun * to the global and sibling lists for the device, then
3109*4882a593Smuzhiyun * adds it to the other relevant subsystems of the driver model.
3110*4882a593Smuzhiyun *
3111*4882a593Smuzhiyun * Do not call this routine or device_register() more than once for
3112*4882a593Smuzhiyun * any device structure. The driver model core is not designed to work
3113*4882a593Smuzhiyun * with devices that get unregistered and then spring back to life.
3114*4882a593Smuzhiyun * (Among other things, it's very hard to guarantee that all references
3115*4882a593Smuzhiyun * to the previous incarnation of @dev have been dropped.) Allocate
3116*4882a593Smuzhiyun * and register a fresh new struct device instead.
3117*4882a593Smuzhiyun *
3118*4882a593Smuzhiyun * NOTE: _Never_ directly free @dev after calling this function, even
3119*4882a593Smuzhiyun * if it returned an error! Always use put_device() to give up your
3120*4882a593Smuzhiyun * reference instead.
3121*4882a593Smuzhiyun *
3122*4882a593Smuzhiyun * Rule of thumb is: if device_add() succeeds, you should call
3123*4882a593Smuzhiyun * device_del() when you want to get rid of it. If device_add() has
3124*4882a593Smuzhiyun * *not* succeeded, use *only* put_device() to drop the reference
3125*4882a593Smuzhiyun * count.
3126*4882a593Smuzhiyun */
device_add(struct device * dev)3127*4882a593Smuzhiyun int device_add(struct device *dev)
3128*4882a593Smuzhiyun {
3129*4882a593Smuzhiyun struct device *parent;
3130*4882a593Smuzhiyun struct kobject *kobj;
3131*4882a593Smuzhiyun struct class_interface *class_intf;
3132*4882a593Smuzhiyun int error = -EINVAL;
3133*4882a593Smuzhiyun struct kobject *glue_dir = NULL;
3134*4882a593Smuzhiyun
3135*4882a593Smuzhiyun dev = get_device(dev);
3136*4882a593Smuzhiyun if (!dev)
3137*4882a593Smuzhiyun goto done;
3138*4882a593Smuzhiyun
3139*4882a593Smuzhiyun if (!dev->p) {
3140*4882a593Smuzhiyun error = device_private_init(dev);
3141*4882a593Smuzhiyun if (error)
3142*4882a593Smuzhiyun goto done;
3143*4882a593Smuzhiyun }
3144*4882a593Smuzhiyun
3145*4882a593Smuzhiyun /*
3146*4882a593Smuzhiyun * for statically allocated devices, which should all be converted
3147*4882a593Smuzhiyun * some day, we need to initialize the name. We prevent reading back
3148*4882a593Smuzhiyun * the name, and force the use of dev_name()
3149*4882a593Smuzhiyun */
3150*4882a593Smuzhiyun if (dev->init_name) {
3151*4882a593Smuzhiyun dev_set_name(dev, "%s", dev->init_name);
3152*4882a593Smuzhiyun dev->init_name = NULL;
3153*4882a593Smuzhiyun }
3154*4882a593Smuzhiyun
3155*4882a593Smuzhiyun /* subsystems can specify simple device enumeration */
3156*4882a593Smuzhiyun if (!dev_name(dev) && dev->bus && dev->bus->dev_name)
3157*4882a593Smuzhiyun dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id);
3158*4882a593Smuzhiyun
3159*4882a593Smuzhiyun if (!dev_name(dev)) {
3160*4882a593Smuzhiyun error = -EINVAL;
3161*4882a593Smuzhiyun goto name_error;
3162*4882a593Smuzhiyun }
3163*4882a593Smuzhiyun
3164*4882a593Smuzhiyun pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
3165*4882a593Smuzhiyun
3166*4882a593Smuzhiyun parent = get_device(dev->parent);
3167*4882a593Smuzhiyun kobj = get_device_parent(dev, parent);
3168*4882a593Smuzhiyun if (IS_ERR(kobj)) {
3169*4882a593Smuzhiyun error = PTR_ERR(kobj);
3170*4882a593Smuzhiyun goto parent_error;
3171*4882a593Smuzhiyun }
3172*4882a593Smuzhiyun if (kobj)
3173*4882a593Smuzhiyun dev->kobj.parent = kobj;
3174*4882a593Smuzhiyun
3175*4882a593Smuzhiyun /* use parent numa_node */
3176*4882a593Smuzhiyun if (parent && (dev_to_node(dev) == NUMA_NO_NODE))
3177*4882a593Smuzhiyun set_dev_node(dev, dev_to_node(parent));
3178*4882a593Smuzhiyun
3179*4882a593Smuzhiyun /* first, register with generic layer. */
3180*4882a593Smuzhiyun /* we require the name to be set before, and pass NULL */
3181*4882a593Smuzhiyun error = kobject_add(&dev->kobj, dev->kobj.parent, NULL);
3182*4882a593Smuzhiyun if (error) {
3183*4882a593Smuzhiyun glue_dir = get_glue_dir(dev);
3184*4882a593Smuzhiyun goto Error;
3185*4882a593Smuzhiyun }
3186*4882a593Smuzhiyun
3187*4882a593Smuzhiyun /* notify platform of device entry */
3188*4882a593Smuzhiyun error = device_platform_notify(dev, KOBJ_ADD);
3189*4882a593Smuzhiyun if (error)
3190*4882a593Smuzhiyun goto platform_error;
3191*4882a593Smuzhiyun
3192*4882a593Smuzhiyun error = device_create_file(dev, &dev_attr_uevent);
3193*4882a593Smuzhiyun if (error)
3194*4882a593Smuzhiyun goto attrError;
3195*4882a593Smuzhiyun
3196*4882a593Smuzhiyun error = device_add_class_symlinks(dev);
3197*4882a593Smuzhiyun if (error)
3198*4882a593Smuzhiyun goto SymlinkError;
3199*4882a593Smuzhiyun error = device_add_attrs(dev);
3200*4882a593Smuzhiyun if (error)
3201*4882a593Smuzhiyun goto AttrsError;
3202*4882a593Smuzhiyun error = bus_add_device(dev);
3203*4882a593Smuzhiyun if (error)
3204*4882a593Smuzhiyun goto BusError;
3205*4882a593Smuzhiyun error = dpm_sysfs_add(dev);
3206*4882a593Smuzhiyun if (error)
3207*4882a593Smuzhiyun goto DPMError;
3208*4882a593Smuzhiyun device_pm_add(dev);
3209*4882a593Smuzhiyun
3210*4882a593Smuzhiyun if (MAJOR(dev->devt)) {
3211*4882a593Smuzhiyun error = device_create_file(dev, &dev_attr_dev);
3212*4882a593Smuzhiyun if (error)
3213*4882a593Smuzhiyun goto DevAttrError;
3214*4882a593Smuzhiyun
3215*4882a593Smuzhiyun error = device_create_sys_dev_entry(dev);
3216*4882a593Smuzhiyun if (error)
3217*4882a593Smuzhiyun goto SysEntryError;
3218*4882a593Smuzhiyun
3219*4882a593Smuzhiyun devtmpfs_create_node(dev);
3220*4882a593Smuzhiyun }
3221*4882a593Smuzhiyun
3222*4882a593Smuzhiyun /* Notify clients of device addition. This call must come
3223*4882a593Smuzhiyun * after dpm_sysfs_add() and before kobject_uevent().
3224*4882a593Smuzhiyun */
3225*4882a593Smuzhiyun if (dev->bus)
3226*4882a593Smuzhiyun blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
3227*4882a593Smuzhiyun BUS_NOTIFY_ADD_DEVICE, dev);
3228*4882a593Smuzhiyun
3229*4882a593Smuzhiyun kobject_uevent(&dev->kobj, KOBJ_ADD);
3230*4882a593Smuzhiyun
3231*4882a593Smuzhiyun /*
3232*4882a593Smuzhiyun * Check if any of the other devices (consumers) have been waiting for
3233*4882a593Smuzhiyun * this device (supplier) to be added so that they can create a device
3234*4882a593Smuzhiyun * link to it.
3235*4882a593Smuzhiyun *
3236*4882a593Smuzhiyun * This needs to happen after device_pm_add() because device_link_add()
3237*4882a593Smuzhiyun * requires the supplier be registered before it's called.
3238*4882a593Smuzhiyun *
3239*4882a593Smuzhiyun * But this also needs to happen before bus_probe_device() to make sure
3240*4882a593Smuzhiyun * waiting consumers can link to it before the driver is bound to the
3241*4882a593Smuzhiyun * device and the driver sync_state callback is called for this device.
3242*4882a593Smuzhiyun */
3243*4882a593Smuzhiyun if (dev->fwnode && !dev->fwnode->dev) {
3244*4882a593Smuzhiyun dev->fwnode->dev = dev;
3245*4882a593Smuzhiyun fw_devlink_link_device(dev);
3246*4882a593Smuzhiyun }
3247*4882a593Smuzhiyun
3248*4882a593Smuzhiyun bus_probe_device(dev);
3249*4882a593Smuzhiyun if (parent)
3250*4882a593Smuzhiyun klist_add_tail(&dev->p->knode_parent,
3251*4882a593Smuzhiyun &parent->p->klist_children);
3252*4882a593Smuzhiyun
3253*4882a593Smuzhiyun if (dev->class) {
3254*4882a593Smuzhiyun mutex_lock(&dev->class->p->mutex);
3255*4882a593Smuzhiyun /* tie the class to the device */
3256*4882a593Smuzhiyun klist_add_tail(&dev->p->knode_class,
3257*4882a593Smuzhiyun &dev->class->p->klist_devices);
3258*4882a593Smuzhiyun
3259*4882a593Smuzhiyun /* notify any interfaces that the device is here */
3260*4882a593Smuzhiyun list_for_each_entry(class_intf,
3261*4882a593Smuzhiyun &dev->class->p->interfaces, node)
3262*4882a593Smuzhiyun if (class_intf->add_dev)
3263*4882a593Smuzhiyun class_intf->add_dev(dev, class_intf);
3264*4882a593Smuzhiyun mutex_unlock(&dev->class->p->mutex);
3265*4882a593Smuzhiyun }
3266*4882a593Smuzhiyun done:
3267*4882a593Smuzhiyun put_device(dev);
3268*4882a593Smuzhiyun return error;
3269*4882a593Smuzhiyun SysEntryError:
3270*4882a593Smuzhiyun if (MAJOR(dev->devt))
3271*4882a593Smuzhiyun device_remove_file(dev, &dev_attr_dev);
3272*4882a593Smuzhiyun DevAttrError:
3273*4882a593Smuzhiyun device_pm_remove(dev);
3274*4882a593Smuzhiyun dpm_sysfs_remove(dev);
3275*4882a593Smuzhiyun DPMError:
3276*4882a593Smuzhiyun bus_remove_device(dev);
3277*4882a593Smuzhiyun BusError:
3278*4882a593Smuzhiyun device_remove_attrs(dev);
3279*4882a593Smuzhiyun AttrsError:
3280*4882a593Smuzhiyun device_remove_class_symlinks(dev);
3281*4882a593Smuzhiyun SymlinkError:
3282*4882a593Smuzhiyun device_remove_file(dev, &dev_attr_uevent);
3283*4882a593Smuzhiyun attrError:
3284*4882a593Smuzhiyun device_platform_notify(dev, KOBJ_REMOVE);
3285*4882a593Smuzhiyun platform_error:
3286*4882a593Smuzhiyun kobject_uevent(&dev->kobj, KOBJ_REMOVE);
3287*4882a593Smuzhiyun glue_dir = get_glue_dir(dev);
3288*4882a593Smuzhiyun kobject_del(&dev->kobj);
3289*4882a593Smuzhiyun Error:
3290*4882a593Smuzhiyun cleanup_glue_dir(dev, glue_dir);
3291*4882a593Smuzhiyun parent_error:
3292*4882a593Smuzhiyun put_device(parent);
3293*4882a593Smuzhiyun name_error:
3294*4882a593Smuzhiyun kfree(dev->p);
3295*4882a593Smuzhiyun dev->p = NULL;
3296*4882a593Smuzhiyun goto done;
3297*4882a593Smuzhiyun }
3298*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(device_add);
3299*4882a593Smuzhiyun
3300*4882a593Smuzhiyun /**
3301*4882a593Smuzhiyun * device_register - register a device with the system.
3302*4882a593Smuzhiyun * @dev: pointer to the device structure
3303*4882a593Smuzhiyun *
3304*4882a593Smuzhiyun * This happens in two clean steps - initialize the device
3305*4882a593Smuzhiyun * and add it to the system. The two steps can be called
3306*4882a593Smuzhiyun * separately, but this is the easiest and most common.
3307*4882a593Smuzhiyun * I.e. you should only call the two helpers separately if
3308*4882a593Smuzhiyun * have a clearly defined need to use and refcount the device
3309*4882a593Smuzhiyun * before it is added to the hierarchy.
3310*4882a593Smuzhiyun *
3311*4882a593Smuzhiyun * For more information, see the kerneldoc for device_initialize()
3312*4882a593Smuzhiyun * and device_add().
3313*4882a593Smuzhiyun *
3314*4882a593Smuzhiyun * NOTE: _Never_ directly free @dev after calling this function, even
3315*4882a593Smuzhiyun * if it returned an error! Always use put_device() to give up the
3316*4882a593Smuzhiyun * reference initialized in this function instead.
3317*4882a593Smuzhiyun */
device_register(struct device * dev)3318*4882a593Smuzhiyun int device_register(struct device *dev)
3319*4882a593Smuzhiyun {
3320*4882a593Smuzhiyun device_initialize(dev);
3321*4882a593Smuzhiyun return device_add(dev);
3322*4882a593Smuzhiyun }
3323*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(device_register);
3324*4882a593Smuzhiyun
3325*4882a593Smuzhiyun /**
3326*4882a593Smuzhiyun * get_device - increment reference count for device.
3327*4882a593Smuzhiyun * @dev: device.
3328*4882a593Smuzhiyun *
3329*4882a593Smuzhiyun * This simply forwards the call to kobject_get(), though
3330*4882a593Smuzhiyun * we do take care to provide for the case that we get a NULL
3331*4882a593Smuzhiyun * pointer passed in.
3332*4882a593Smuzhiyun */
get_device(struct device * dev)3333*4882a593Smuzhiyun struct device *get_device(struct device *dev)
3334*4882a593Smuzhiyun {
3335*4882a593Smuzhiyun return dev ? kobj_to_dev(kobject_get(&dev->kobj)) : NULL;
3336*4882a593Smuzhiyun }
3337*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(get_device);
3338*4882a593Smuzhiyun
3339*4882a593Smuzhiyun /**
3340*4882a593Smuzhiyun * put_device - decrement reference count.
3341*4882a593Smuzhiyun * @dev: device in question.
3342*4882a593Smuzhiyun */
put_device(struct device * dev)3343*4882a593Smuzhiyun void put_device(struct device *dev)
3344*4882a593Smuzhiyun {
3345*4882a593Smuzhiyun /* might_sleep(); */
3346*4882a593Smuzhiyun if (dev)
3347*4882a593Smuzhiyun kobject_put(&dev->kobj);
3348*4882a593Smuzhiyun }
3349*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(put_device);
3350*4882a593Smuzhiyun
kill_device(struct device * dev)3351*4882a593Smuzhiyun bool kill_device(struct device *dev)
3352*4882a593Smuzhiyun {
3353*4882a593Smuzhiyun /*
3354*4882a593Smuzhiyun * Require the device lock and set the "dead" flag to guarantee that
3355*4882a593Smuzhiyun * the update behavior is consistent with the other bitfields near
3356*4882a593Smuzhiyun * it and that we cannot have an asynchronous probe routine trying
3357*4882a593Smuzhiyun * to run while we are tearing out the bus/class/sysfs from
3358*4882a593Smuzhiyun * underneath the device.
3359*4882a593Smuzhiyun */
3360*4882a593Smuzhiyun lockdep_assert_held(&dev->mutex);
3361*4882a593Smuzhiyun
3362*4882a593Smuzhiyun if (dev->p->dead)
3363*4882a593Smuzhiyun return false;
3364*4882a593Smuzhiyun dev->p->dead = true;
3365*4882a593Smuzhiyun return true;
3366*4882a593Smuzhiyun }
3367*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(kill_device);
3368*4882a593Smuzhiyun
3369*4882a593Smuzhiyun /**
3370*4882a593Smuzhiyun * device_del - delete device from system.
3371*4882a593Smuzhiyun * @dev: device.
3372*4882a593Smuzhiyun *
3373*4882a593Smuzhiyun * This is the first part of the device unregistration
3374*4882a593Smuzhiyun * sequence. This removes the device from the lists we control
3375*4882a593Smuzhiyun * from here, has it removed from the other driver model
3376*4882a593Smuzhiyun * subsystems it was added to in device_add(), and removes it
3377*4882a593Smuzhiyun * from the kobject hierarchy.
3378*4882a593Smuzhiyun *
3379*4882a593Smuzhiyun * NOTE: this should be called manually _iff_ device_add() was
3380*4882a593Smuzhiyun * also called manually.
3381*4882a593Smuzhiyun */
device_del(struct device * dev)3382*4882a593Smuzhiyun void device_del(struct device *dev)
3383*4882a593Smuzhiyun {
3384*4882a593Smuzhiyun struct device *parent = dev->parent;
3385*4882a593Smuzhiyun struct kobject *glue_dir = NULL;
3386*4882a593Smuzhiyun struct class_interface *class_intf;
3387*4882a593Smuzhiyun unsigned int noio_flag;
3388*4882a593Smuzhiyun
3389*4882a593Smuzhiyun device_lock(dev);
3390*4882a593Smuzhiyun kill_device(dev);
3391*4882a593Smuzhiyun device_unlock(dev);
3392*4882a593Smuzhiyun
3393*4882a593Smuzhiyun if (dev->fwnode && dev->fwnode->dev == dev)
3394*4882a593Smuzhiyun dev->fwnode->dev = NULL;
3395*4882a593Smuzhiyun
3396*4882a593Smuzhiyun /* Notify clients of device removal. This call must come
3397*4882a593Smuzhiyun * before dpm_sysfs_remove().
3398*4882a593Smuzhiyun */
3399*4882a593Smuzhiyun noio_flag = memalloc_noio_save();
3400*4882a593Smuzhiyun if (dev->bus)
3401*4882a593Smuzhiyun blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
3402*4882a593Smuzhiyun BUS_NOTIFY_DEL_DEVICE, dev);
3403*4882a593Smuzhiyun
3404*4882a593Smuzhiyun dpm_sysfs_remove(dev);
3405*4882a593Smuzhiyun if (parent)
3406*4882a593Smuzhiyun klist_del(&dev->p->knode_parent);
3407*4882a593Smuzhiyun if (MAJOR(dev->devt)) {
3408*4882a593Smuzhiyun devtmpfs_delete_node(dev);
3409*4882a593Smuzhiyun device_remove_sys_dev_entry(dev);
3410*4882a593Smuzhiyun device_remove_file(dev, &dev_attr_dev);
3411*4882a593Smuzhiyun }
3412*4882a593Smuzhiyun if (dev->class) {
3413*4882a593Smuzhiyun device_remove_class_symlinks(dev);
3414*4882a593Smuzhiyun
3415*4882a593Smuzhiyun mutex_lock(&dev->class->p->mutex);
3416*4882a593Smuzhiyun /* notify any interfaces that the device is now gone */
3417*4882a593Smuzhiyun list_for_each_entry(class_intf,
3418*4882a593Smuzhiyun &dev->class->p->interfaces, node)
3419*4882a593Smuzhiyun if (class_intf->remove_dev)
3420*4882a593Smuzhiyun class_intf->remove_dev(dev, class_intf);
3421*4882a593Smuzhiyun /* remove the device from the class list */
3422*4882a593Smuzhiyun klist_del(&dev->p->knode_class);
3423*4882a593Smuzhiyun mutex_unlock(&dev->class->p->mutex);
3424*4882a593Smuzhiyun }
3425*4882a593Smuzhiyun device_remove_file(dev, &dev_attr_uevent);
3426*4882a593Smuzhiyun device_remove_attrs(dev);
3427*4882a593Smuzhiyun bus_remove_device(dev);
3428*4882a593Smuzhiyun device_pm_remove(dev);
3429*4882a593Smuzhiyun driver_deferred_probe_del(dev);
3430*4882a593Smuzhiyun device_platform_notify(dev, KOBJ_REMOVE);
3431*4882a593Smuzhiyun device_remove_properties(dev);
3432*4882a593Smuzhiyun device_links_purge(dev);
3433*4882a593Smuzhiyun
3434*4882a593Smuzhiyun if (dev->bus)
3435*4882a593Smuzhiyun blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
3436*4882a593Smuzhiyun BUS_NOTIFY_REMOVED_DEVICE, dev);
3437*4882a593Smuzhiyun kobject_uevent(&dev->kobj, KOBJ_REMOVE);
3438*4882a593Smuzhiyun glue_dir = get_glue_dir(dev);
3439*4882a593Smuzhiyun kobject_del(&dev->kobj);
3440*4882a593Smuzhiyun cleanup_glue_dir(dev, glue_dir);
3441*4882a593Smuzhiyun memalloc_noio_restore(noio_flag);
3442*4882a593Smuzhiyun put_device(parent);
3443*4882a593Smuzhiyun }
3444*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(device_del);
3445*4882a593Smuzhiyun
3446*4882a593Smuzhiyun /**
3447*4882a593Smuzhiyun * device_unregister - unregister device from system.
3448*4882a593Smuzhiyun * @dev: device going away.
3449*4882a593Smuzhiyun *
3450*4882a593Smuzhiyun * We do this in two parts, like we do device_register(). First,
3451*4882a593Smuzhiyun * we remove it from all the subsystems with device_del(), then
3452*4882a593Smuzhiyun * we decrement the reference count via put_device(). If that
3453*4882a593Smuzhiyun * is the final reference count, the device will be cleaned up
3454*4882a593Smuzhiyun * via device_release() above. Otherwise, the structure will
3455*4882a593Smuzhiyun * stick around until the final reference to the device is dropped.
3456*4882a593Smuzhiyun */
device_unregister(struct device * dev)3457*4882a593Smuzhiyun void device_unregister(struct device *dev)
3458*4882a593Smuzhiyun {
3459*4882a593Smuzhiyun pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
3460*4882a593Smuzhiyun device_del(dev);
3461*4882a593Smuzhiyun put_device(dev);
3462*4882a593Smuzhiyun }
3463*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(device_unregister);
3464*4882a593Smuzhiyun
prev_device(struct klist_iter * i)3465*4882a593Smuzhiyun static struct device *prev_device(struct klist_iter *i)
3466*4882a593Smuzhiyun {
3467*4882a593Smuzhiyun struct klist_node *n = klist_prev(i);
3468*4882a593Smuzhiyun struct device *dev = NULL;
3469*4882a593Smuzhiyun struct device_private *p;
3470*4882a593Smuzhiyun
3471*4882a593Smuzhiyun if (n) {
3472*4882a593Smuzhiyun p = to_device_private_parent(n);
3473*4882a593Smuzhiyun dev = p->device;
3474*4882a593Smuzhiyun }
3475*4882a593Smuzhiyun return dev;
3476*4882a593Smuzhiyun }
3477*4882a593Smuzhiyun
next_device(struct klist_iter * i)3478*4882a593Smuzhiyun static struct device *next_device(struct klist_iter *i)
3479*4882a593Smuzhiyun {
3480*4882a593Smuzhiyun struct klist_node *n = klist_next(i);
3481*4882a593Smuzhiyun struct device *dev = NULL;
3482*4882a593Smuzhiyun struct device_private *p;
3483*4882a593Smuzhiyun
3484*4882a593Smuzhiyun if (n) {
3485*4882a593Smuzhiyun p = to_device_private_parent(n);
3486*4882a593Smuzhiyun dev = p->device;
3487*4882a593Smuzhiyun }
3488*4882a593Smuzhiyun return dev;
3489*4882a593Smuzhiyun }
3490*4882a593Smuzhiyun
3491*4882a593Smuzhiyun /**
3492*4882a593Smuzhiyun * device_get_devnode - path of device node file
3493*4882a593Smuzhiyun * @dev: device
3494*4882a593Smuzhiyun * @mode: returned file access mode
3495*4882a593Smuzhiyun * @uid: returned file owner
3496*4882a593Smuzhiyun * @gid: returned file group
3497*4882a593Smuzhiyun * @tmp: possibly allocated string
3498*4882a593Smuzhiyun *
3499*4882a593Smuzhiyun * Return the relative path of a possible device node.
3500*4882a593Smuzhiyun * Non-default names may need to allocate a memory to compose
3501*4882a593Smuzhiyun * a name. This memory is returned in tmp and needs to be
3502*4882a593Smuzhiyun * freed by the caller.
3503*4882a593Smuzhiyun */
device_get_devnode(struct device * dev,umode_t * mode,kuid_t * uid,kgid_t * gid,const char ** tmp)3504*4882a593Smuzhiyun const char *device_get_devnode(struct device *dev,
3505*4882a593Smuzhiyun umode_t *mode, kuid_t *uid, kgid_t *gid,
3506*4882a593Smuzhiyun const char **tmp)
3507*4882a593Smuzhiyun {
3508*4882a593Smuzhiyun char *s;
3509*4882a593Smuzhiyun
3510*4882a593Smuzhiyun *tmp = NULL;
3511*4882a593Smuzhiyun
3512*4882a593Smuzhiyun /* the device type may provide a specific name */
3513*4882a593Smuzhiyun if (dev->type && dev->type->devnode)
3514*4882a593Smuzhiyun *tmp = dev->type->devnode(dev, mode, uid, gid);
3515*4882a593Smuzhiyun if (*tmp)
3516*4882a593Smuzhiyun return *tmp;
3517*4882a593Smuzhiyun
3518*4882a593Smuzhiyun /* the class may provide a specific name */
3519*4882a593Smuzhiyun if (dev->class && dev->class->devnode)
3520*4882a593Smuzhiyun *tmp = dev->class->devnode(dev, mode);
3521*4882a593Smuzhiyun if (*tmp)
3522*4882a593Smuzhiyun return *tmp;
3523*4882a593Smuzhiyun
3524*4882a593Smuzhiyun /* return name without allocation, tmp == NULL */
3525*4882a593Smuzhiyun if (strchr(dev_name(dev), '!') == NULL)
3526*4882a593Smuzhiyun return dev_name(dev);
3527*4882a593Smuzhiyun
3528*4882a593Smuzhiyun /* replace '!' in the name with '/' */
3529*4882a593Smuzhiyun s = kstrdup(dev_name(dev), GFP_KERNEL);
3530*4882a593Smuzhiyun if (!s)
3531*4882a593Smuzhiyun return NULL;
3532*4882a593Smuzhiyun strreplace(s, '!', '/');
3533*4882a593Smuzhiyun return *tmp = s;
3534*4882a593Smuzhiyun }
3535*4882a593Smuzhiyun
3536*4882a593Smuzhiyun /**
3537*4882a593Smuzhiyun * device_for_each_child - device child iterator.
3538*4882a593Smuzhiyun * @parent: parent struct device.
3539*4882a593Smuzhiyun * @fn: function to be called for each device.
3540*4882a593Smuzhiyun * @data: data for the callback.
3541*4882a593Smuzhiyun *
3542*4882a593Smuzhiyun * Iterate over @parent's child devices, and call @fn for each,
3543*4882a593Smuzhiyun * passing it @data.
3544*4882a593Smuzhiyun *
3545*4882a593Smuzhiyun * We check the return of @fn each time. If it returns anything
3546*4882a593Smuzhiyun * other than 0, we break out and return that value.
3547*4882a593Smuzhiyun */
device_for_each_child(struct device * parent,void * data,int (* fn)(struct device * dev,void * data))3548*4882a593Smuzhiyun int device_for_each_child(struct device *parent, void *data,
3549*4882a593Smuzhiyun int (*fn)(struct device *dev, void *data))
3550*4882a593Smuzhiyun {
3551*4882a593Smuzhiyun struct klist_iter i;
3552*4882a593Smuzhiyun struct device *child;
3553*4882a593Smuzhiyun int error = 0;
3554*4882a593Smuzhiyun
3555*4882a593Smuzhiyun if (!parent->p)
3556*4882a593Smuzhiyun return 0;
3557*4882a593Smuzhiyun
3558*4882a593Smuzhiyun klist_iter_init(&parent->p->klist_children, &i);
3559*4882a593Smuzhiyun while (!error && (child = next_device(&i)))
3560*4882a593Smuzhiyun error = fn(child, data);
3561*4882a593Smuzhiyun klist_iter_exit(&i);
3562*4882a593Smuzhiyun return error;
3563*4882a593Smuzhiyun }
3564*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(device_for_each_child);
3565*4882a593Smuzhiyun
3566*4882a593Smuzhiyun /**
3567*4882a593Smuzhiyun * device_for_each_child_reverse - device child iterator in reversed order.
3568*4882a593Smuzhiyun * @parent: parent struct device.
3569*4882a593Smuzhiyun * @fn: function to be called for each device.
3570*4882a593Smuzhiyun * @data: data for the callback.
3571*4882a593Smuzhiyun *
3572*4882a593Smuzhiyun * Iterate over @parent's child devices, and call @fn for each,
3573*4882a593Smuzhiyun * passing it @data.
3574*4882a593Smuzhiyun *
3575*4882a593Smuzhiyun * We check the return of @fn each time. If it returns anything
3576*4882a593Smuzhiyun * other than 0, we break out and return that value.
3577*4882a593Smuzhiyun */
device_for_each_child_reverse(struct device * parent,void * data,int (* fn)(struct device * dev,void * data))3578*4882a593Smuzhiyun int device_for_each_child_reverse(struct device *parent, void *data,
3579*4882a593Smuzhiyun int (*fn)(struct device *dev, void *data))
3580*4882a593Smuzhiyun {
3581*4882a593Smuzhiyun struct klist_iter i;
3582*4882a593Smuzhiyun struct device *child;
3583*4882a593Smuzhiyun int error = 0;
3584*4882a593Smuzhiyun
3585*4882a593Smuzhiyun if (!parent->p)
3586*4882a593Smuzhiyun return 0;
3587*4882a593Smuzhiyun
3588*4882a593Smuzhiyun klist_iter_init(&parent->p->klist_children, &i);
3589*4882a593Smuzhiyun while ((child = prev_device(&i)) && !error)
3590*4882a593Smuzhiyun error = fn(child, data);
3591*4882a593Smuzhiyun klist_iter_exit(&i);
3592*4882a593Smuzhiyun return error;
3593*4882a593Smuzhiyun }
3594*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(device_for_each_child_reverse);
3595*4882a593Smuzhiyun
3596*4882a593Smuzhiyun /**
3597*4882a593Smuzhiyun * device_find_child - device iterator for locating a particular device.
3598*4882a593Smuzhiyun * @parent: parent struct device
3599*4882a593Smuzhiyun * @match: Callback function to check device
3600*4882a593Smuzhiyun * @data: Data to pass to match function
3601*4882a593Smuzhiyun *
3602*4882a593Smuzhiyun * This is similar to the device_for_each_child() function above, but it
3603*4882a593Smuzhiyun * returns a reference to a device that is 'found' for later use, as
3604*4882a593Smuzhiyun * determined by the @match callback.
3605*4882a593Smuzhiyun *
3606*4882a593Smuzhiyun * The callback should return 0 if the device doesn't match and non-zero
3607*4882a593Smuzhiyun * if it does. If the callback returns non-zero and a reference to the
3608*4882a593Smuzhiyun * current device can be obtained, this function will return to the caller
3609*4882a593Smuzhiyun * and not iterate over any more devices.
3610*4882a593Smuzhiyun *
3611*4882a593Smuzhiyun * NOTE: you will need to drop the reference with put_device() after use.
3612*4882a593Smuzhiyun */
device_find_child(struct device * parent,void * data,int (* match)(struct device * dev,void * data))3613*4882a593Smuzhiyun struct device *device_find_child(struct device *parent, void *data,
3614*4882a593Smuzhiyun int (*match)(struct device *dev, void *data))
3615*4882a593Smuzhiyun {
3616*4882a593Smuzhiyun struct klist_iter i;
3617*4882a593Smuzhiyun struct device *child;
3618*4882a593Smuzhiyun
3619*4882a593Smuzhiyun if (!parent)
3620*4882a593Smuzhiyun return NULL;
3621*4882a593Smuzhiyun
3622*4882a593Smuzhiyun klist_iter_init(&parent->p->klist_children, &i);
3623*4882a593Smuzhiyun while ((child = next_device(&i)))
3624*4882a593Smuzhiyun if (match(child, data) && get_device(child))
3625*4882a593Smuzhiyun break;
3626*4882a593Smuzhiyun klist_iter_exit(&i);
3627*4882a593Smuzhiyun return child;
3628*4882a593Smuzhiyun }
3629*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(device_find_child);
3630*4882a593Smuzhiyun
3631*4882a593Smuzhiyun /**
3632*4882a593Smuzhiyun * device_find_child_by_name - device iterator for locating a child device.
3633*4882a593Smuzhiyun * @parent: parent struct device
3634*4882a593Smuzhiyun * @name: name of the child device
3635*4882a593Smuzhiyun *
3636*4882a593Smuzhiyun * This is similar to the device_find_child() function above, but it
3637*4882a593Smuzhiyun * returns a reference to a device that has the name @name.
3638*4882a593Smuzhiyun *
3639*4882a593Smuzhiyun * NOTE: you will need to drop the reference with put_device() after use.
3640*4882a593Smuzhiyun */
device_find_child_by_name(struct device * parent,const char * name)3641*4882a593Smuzhiyun struct device *device_find_child_by_name(struct device *parent,
3642*4882a593Smuzhiyun const char *name)
3643*4882a593Smuzhiyun {
3644*4882a593Smuzhiyun struct klist_iter i;
3645*4882a593Smuzhiyun struct device *child;
3646*4882a593Smuzhiyun
3647*4882a593Smuzhiyun if (!parent)
3648*4882a593Smuzhiyun return NULL;
3649*4882a593Smuzhiyun
3650*4882a593Smuzhiyun klist_iter_init(&parent->p->klist_children, &i);
3651*4882a593Smuzhiyun while ((child = next_device(&i)))
3652*4882a593Smuzhiyun if (sysfs_streq(dev_name(child), name) && get_device(child))
3653*4882a593Smuzhiyun break;
3654*4882a593Smuzhiyun klist_iter_exit(&i);
3655*4882a593Smuzhiyun return child;
3656*4882a593Smuzhiyun }
3657*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(device_find_child_by_name);
3658*4882a593Smuzhiyun
devices_init(void)3659*4882a593Smuzhiyun int __init devices_init(void)
3660*4882a593Smuzhiyun {
3661*4882a593Smuzhiyun devices_kset = kset_create_and_add("devices", &device_uevent_ops, NULL);
3662*4882a593Smuzhiyun if (!devices_kset)
3663*4882a593Smuzhiyun return -ENOMEM;
3664*4882a593Smuzhiyun dev_kobj = kobject_create_and_add("dev", NULL);
3665*4882a593Smuzhiyun if (!dev_kobj)
3666*4882a593Smuzhiyun goto dev_kobj_err;
3667*4882a593Smuzhiyun sysfs_dev_block_kobj = kobject_create_and_add("block", dev_kobj);
3668*4882a593Smuzhiyun if (!sysfs_dev_block_kobj)
3669*4882a593Smuzhiyun goto block_kobj_err;
3670*4882a593Smuzhiyun sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj);
3671*4882a593Smuzhiyun if (!sysfs_dev_char_kobj)
3672*4882a593Smuzhiyun goto char_kobj_err;
3673*4882a593Smuzhiyun
3674*4882a593Smuzhiyun return 0;
3675*4882a593Smuzhiyun
3676*4882a593Smuzhiyun char_kobj_err:
3677*4882a593Smuzhiyun kobject_put(sysfs_dev_block_kobj);
3678*4882a593Smuzhiyun block_kobj_err:
3679*4882a593Smuzhiyun kobject_put(dev_kobj);
3680*4882a593Smuzhiyun dev_kobj_err:
3681*4882a593Smuzhiyun kset_unregister(devices_kset);
3682*4882a593Smuzhiyun return -ENOMEM;
3683*4882a593Smuzhiyun }
3684*4882a593Smuzhiyun
device_check_offline(struct device * dev,void * not_used)3685*4882a593Smuzhiyun static int device_check_offline(struct device *dev, void *not_used)
3686*4882a593Smuzhiyun {
3687*4882a593Smuzhiyun int ret;
3688*4882a593Smuzhiyun
3689*4882a593Smuzhiyun ret = device_for_each_child(dev, NULL, device_check_offline);
3690*4882a593Smuzhiyun if (ret)
3691*4882a593Smuzhiyun return ret;
3692*4882a593Smuzhiyun
3693*4882a593Smuzhiyun return device_supports_offline(dev) && !dev->offline ? -EBUSY : 0;
3694*4882a593Smuzhiyun }
3695*4882a593Smuzhiyun
3696*4882a593Smuzhiyun /**
3697*4882a593Smuzhiyun * device_offline - Prepare the device for hot-removal.
3698*4882a593Smuzhiyun * @dev: Device to be put offline.
3699*4882a593Smuzhiyun *
3700*4882a593Smuzhiyun * Execute the device bus type's .offline() callback, if present, to prepare
3701*4882a593Smuzhiyun * the device for a subsequent hot-removal. If that succeeds, the device must
3702*4882a593Smuzhiyun * not be used until either it is removed or its bus type's .online() callback
3703*4882a593Smuzhiyun * is executed.
3704*4882a593Smuzhiyun *
3705*4882a593Smuzhiyun * Call under device_hotplug_lock.
3706*4882a593Smuzhiyun */
device_offline(struct device * dev)3707*4882a593Smuzhiyun int device_offline(struct device *dev)
3708*4882a593Smuzhiyun {
3709*4882a593Smuzhiyun int ret;
3710*4882a593Smuzhiyun
3711*4882a593Smuzhiyun if (dev->offline_disabled)
3712*4882a593Smuzhiyun return -EPERM;
3713*4882a593Smuzhiyun
3714*4882a593Smuzhiyun ret = device_for_each_child(dev, NULL, device_check_offline);
3715*4882a593Smuzhiyun if (ret)
3716*4882a593Smuzhiyun return ret;
3717*4882a593Smuzhiyun
3718*4882a593Smuzhiyun device_lock(dev);
3719*4882a593Smuzhiyun if (device_supports_offline(dev)) {
3720*4882a593Smuzhiyun if (dev->offline) {
3721*4882a593Smuzhiyun ret = 1;
3722*4882a593Smuzhiyun } else {
3723*4882a593Smuzhiyun ret = dev->bus->offline(dev);
3724*4882a593Smuzhiyun if (!ret) {
3725*4882a593Smuzhiyun kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
3726*4882a593Smuzhiyun dev->offline = true;
3727*4882a593Smuzhiyun }
3728*4882a593Smuzhiyun }
3729*4882a593Smuzhiyun }
3730*4882a593Smuzhiyun device_unlock(dev);
3731*4882a593Smuzhiyun
3732*4882a593Smuzhiyun return ret;
3733*4882a593Smuzhiyun }
3734*4882a593Smuzhiyun
3735*4882a593Smuzhiyun /**
3736*4882a593Smuzhiyun * device_online - Put the device back online after successful device_offline().
3737*4882a593Smuzhiyun * @dev: Device to be put back online.
3738*4882a593Smuzhiyun *
3739*4882a593Smuzhiyun * If device_offline() has been successfully executed for @dev, but the device
3740*4882a593Smuzhiyun * has not been removed subsequently, execute its bus type's .online() callback
3741*4882a593Smuzhiyun * to indicate that the device can be used again.
3742*4882a593Smuzhiyun *
3743*4882a593Smuzhiyun * Call under device_hotplug_lock.
3744*4882a593Smuzhiyun */
device_online(struct device * dev)3745*4882a593Smuzhiyun int device_online(struct device *dev)
3746*4882a593Smuzhiyun {
3747*4882a593Smuzhiyun int ret = 0;
3748*4882a593Smuzhiyun
3749*4882a593Smuzhiyun device_lock(dev);
3750*4882a593Smuzhiyun if (device_supports_offline(dev)) {
3751*4882a593Smuzhiyun if (dev->offline) {
3752*4882a593Smuzhiyun ret = dev->bus->online(dev);
3753*4882a593Smuzhiyun if (!ret) {
3754*4882a593Smuzhiyun kobject_uevent(&dev->kobj, KOBJ_ONLINE);
3755*4882a593Smuzhiyun dev->offline = false;
3756*4882a593Smuzhiyun }
3757*4882a593Smuzhiyun } else {
3758*4882a593Smuzhiyun ret = 1;
3759*4882a593Smuzhiyun }
3760*4882a593Smuzhiyun }
3761*4882a593Smuzhiyun device_unlock(dev);
3762*4882a593Smuzhiyun
3763*4882a593Smuzhiyun return ret;
3764*4882a593Smuzhiyun }
3765*4882a593Smuzhiyun
3766*4882a593Smuzhiyun struct root_device {
3767*4882a593Smuzhiyun struct device dev;
3768*4882a593Smuzhiyun struct module *owner;
3769*4882a593Smuzhiyun };
3770*4882a593Smuzhiyun
to_root_device(struct device * d)3771*4882a593Smuzhiyun static inline struct root_device *to_root_device(struct device *d)
3772*4882a593Smuzhiyun {
3773*4882a593Smuzhiyun return container_of(d, struct root_device, dev);
3774*4882a593Smuzhiyun }
3775*4882a593Smuzhiyun
root_device_release(struct device * dev)3776*4882a593Smuzhiyun static void root_device_release(struct device *dev)
3777*4882a593Smuzhiyun {
3778*4882a593Smuzhiyun kfree(to_root_device(dev));
3779*4882a593Smuzhiyun }
3780*4882a593Smuzhiyun
3781*4882a593Smuzhiyun /**
3782*4882a593Smuzhiyun * __root_device_register - allocate and register a root device
3783*4882a593Smuzhiyun * @name: root device name
3784*4882a593Smuzhiyun * @owner: owner module of the root device, usually THIS_MODULE
3785*4882a593Smuzhiyun *
3786*4882a593Smuzhiyun * This function allocates a root device and registers it
3787*4882a593Smuzhiyun * using device_register(). In order to free the returned
3788*4882a593Smuzhiyun * device, use root_device_unregister().
3789*4882a593Smuzhiyun *
3790*4882a593Smuzhiyun * Root devices are dummy devices which allow other devices
3791*4882a593Smuzhiyun * to be grouped under /sys/devices. Use this function to
3792*4882a593Smuzhiyun * allocate a root device and then use it as the parent of
3793*4882a593Smuzhiyun * any device which should appear under /sys/devices/{name}
3794*4882a593Smuzhiyun *
3795*4882a593Smuzhiyun * The /sys/devices/{name} directory will also contain a
3796*4882a593Smuzhiyun * 'module' symlink which points to the @owner directory
3797*4882a593Smuzhiyun * in sysfs.
3798*4882a593Smuzhiyun *
3799*4882a593Smuzhiyun * Returns &struct device pointer on success, or ERR_PTR() on error.
3800*4882a593Smuzhiyun *
3801*4882a593Smuzhiyun * Note: You probably want to use root_device_register().
3802*4882a593Smuzhiyun */
__root_device_register(const char * name,struct module * owner)3803*4882a593Smuzhiyun struct device *__root_device_register(const char *name, struct module *owner)
3804*4882a593Smuzhiyun {
3805*4882a593Smuzhiyun struct root_device *root;
3806*4882a593Smuzhiyun int err = -ENOMEM;
3807*4882a593Smuzhiyun
3808*4882a593Smuzhiyun root = kzalloc(sizeof(struct root_device), GFP_KERNEL);
3809*4882a593Smuzhiyun if (!root)
3810*4882a593Smuzhiyun return ERR_PTR(err);
3811*4882a593Smuzhiyun
3812*4882a593Smuzhiyun err = dev_set_name(&root->dev, "%s", name);
3813*4882a593Smuzhiyun if (err) {
3814*4882a593Smuzhiyun kfree(root);
3815*4882a593Smuzhiyun return ERR_PTR(err);
3816*4882a593Smuzhiyun }
3817*4882a593Smuzhiyun
3818*4882a593Smuzhiyun root->dev.release = root_device_release;
3819*4882a593Smuzhiyun
3820*4882a593Smuzhiyun err = device_register(&root->dev);
3821*4882a593Smuzhiyun if (err) {
3822*4882a593Smuzhiyun put_device(&root->dev);
3823*4882a593Smuzhiyun return ERR_PTR(err);
3824*4882a593Smuzhiyun }
3825*4882a593Smuzhiyun
3826*4882a593Smuzhiyun #ifdef CONFIG_MODULES /* gotta find a "cleaner" way to do this */
3827*4882a593Smuzhiyun if (owner) {
3828*4882a593Smuzhiyun struct module_kobject *mk = &owner->mkobj;
3829*4882a593Smuzhiyun
3830*4882a593Smuzhiyun err = sysfs_create_link(&root->dev.kobj, &mk->kobj, "module");
3831*4882a593Smuzhiyun if (err) {
3832*4882a593Smuzhiyun device_unregister(&root->dev);
3833*4882a593Smuzhiyun return ERR_PTR(err);
3834*4882a593Smuzhiyun }
3835*4882a593Smuzhiyun root->owner = owner;
3836*4882a593Smuzhiyun }
3837*4882a593Smuzhiyun #endif
3838*4882a593Smuzhiyun
3839*4882a593Smuzhiyun return &root->dev;
3840*4882a593Smuzhiyun }
3841*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__root_device_register);
3842*4882a593Smuzhiyun
3843*4882a593Smuzhiyun /**
3844*4882a593Smuzhiyun * root_device_unregister - unregister and free a root device
3845*4882a593Smuzhiyun * @dev: device going away
3846*4882a593Smuzhiyun *
3847*4882a593Smuzhiyun * This function unregisters and cleans up a device that was created by
3848*4882a593Smuzhiyun * root_device_register().
3849*4882a593Smuzhiyun */
root_device_unregister(struct device * dev)3850*4882a593Smuzhiyun void root_device_unregister(struct device *dev)
3851*4882a593Smuzhiyun {
3852*4882a593Smuzhiyun struct root_device *root = to_root_device(dev);
3853*4882a593Smuzhiyun
3854*4882a593Smuzhiyun if (root->owner)
3855*4882a593Smuzhiyun sysfs_remove_link(&root->dev.kobj, "module");
3856*4882a593Smuzhiyun
3857*4882a593Smuzhiyun device_unregister(dev);
3858*4882a593Smuzhiyun }
3859*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(root_device_unregister);
3860*4882a593Smuzhiyun
3861*4882a593Smuzhiyun
device_create_release(struct device * dev)3862*4882a593Smuzhiyun static void device_create_release(struct device *dev)
3863*4882a593Smuzhiyun {
3864*4882a593Smuzhiyun pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
3865*4882a593Smuzhiyun kfree(dev);
3866*4882a593Smuzhiyun }
3867*4882a593Smuzhiyun
3868*4882a593Smuzhiyun static __printf(6, 0) struct device *
device_create_groups_vargs(struct class * class,struct device * parent,dev_t devt,void * drvdata,const struct attribute_group ** groups,const char * fmt,va_list args)3869*4882a593Smuzhiyun device_create_groups_vargs(struct class *class, struct device *parent,
3870*4882a593Smuzhiyun dev_t devt, void *drvdata,
3871*4882a593Smuzhiyun const struct attribute_group **groups,
3872*4882a593Smuzhiyun const char *fmt, va_list args)
3873*4882a593Smuzhiyun {
3874*4882a593Smuzhiyun struct device *dev = NULL;
3875*4882a593Smuzhiyun int retval = -ENODEV;
3876*4882a593Smuzhiyun
3877*4882a593Smuzhiyun if (class == NULL || IS_ERR(class))
3878*4882a593Smuzhiyun goto error;
3879*4882a593Smuzhiyun
3880*4882a593Smuzhiyun dev = kzalloc(sizeof(*dev), GFP_KERNEL);
3881*4882a593Smuzhiyun if (!dev) {
3882*4882a593Smuzhiyun retval = -ENOMEM;
3883*4882a593Smuzhiyun goto error;
3884*4882a593Smuzhiyun }
3885*4882a593Smuzhiyun
3886*4882a593Smuzhiyun device_initialize(dev);
3887*4882a593Smuzhiyun dev->devt = devt;
3888*4882a593Smuzhiyun dev->class = class;
3889*4882a593Smuzhiyun dev->parent = parent;
3890*4882a593Smuzhiyun dev->groups = groups;
3891*4882a593Smuzhiyun dev->release = device_create_release;
3892*4882a593Smuzhiyun dev_set_drvdata(dev, drvdata);
3893*4882a593Smuzhiyun
3894*4882a593Smuzhiyun retval = kobject_set_name_vargs(&dev->kobj, fmt, args);
3895*4882a593Smuzhiyun if (retval)
3896*4882a593Smuzhiyun goto error;
3897*4882a593Smuzhiyun
3898*4882a593Smuzhiyun retval = device_add(dev);
3899*4882a593Smuzhiyun if (retval)
3900*4882a593Smuzhiyun goto error;
3901*4882a593Smuzhiyun
3902*4882a593Smuzhiyun return dev;
3903*4882a593Smuzhiyun
3904*4882a593Smuzhiyun error:
3905*4882a593Smuzhiyun put_device(dev);
3906*4882a593Smuzhiyun return ERR_PTR(retval);
3907*4882a593Smuzhiyun }
3908*4882a593Smuzhiyun
3909*4882a593Smuzhiyun /**
3910*4882a593Smuzhiyun * device_create - creates a device and registers it with sysfs
3911*4882a593Smuzhiyun * @class: pointer to the struct class that this device should be registered to
3912*4882a593Smuzhiyun * @parent: pointer to the parent struct device of this new device, if any
3913*4882a593Smuzhiyun * @devt: the dev_t for the char device to be added
3914*4882a593Smuzhiyun * @drvdata: the data to be added to the device for callbacks
3915*4882a593Smuzhiyun * @fmt: string for the device's name
3916*4882a593Smuzhiyun *
3917*4882a593Smuzhiyun * This function can be used by char device classes. A struct device
3918*4882a593Smuzhiyun * will be created in sysfs, registered to the specified class.
3919*4882a593Smuzhiyun *
3920*4882a593Smuzhiyun * A "dev" file will be created, showing the dev_t for the device, if
3921*4882a593Smuzhiyun * the dev_t is not 0,0.
3922*4882a593Smuzhiyun * If a pointer to a parent struct device is passed in, the newly created
3923*4882a593Smuzhiyun * struct device will be a child of that device in sysfs.
3924*4882a593Smuzhiyun * The pointer to the struct device will be returned from the call.
3925*4882a593Smuzhiyun * Any further sysfs files that might be required can be created using this
3926*4882a593Smuzhiyun * pointer.
3927*4882a593Smuzhiyun *
3928*4882a593Smuzhiyun * Returns &struct device pointer on success, or ERR_PTR() on error.
3929*4882a593Smuzhiyun *
3930*4882a593Smuzhiyun * Note: the struct class passed to this function must have previously
3931*4882a593Smuzhiyun * been created with a call to class_create().
3932*4882a593Smuzhiyun */
device_create(struct class * class,struct device * parent,dev_t devt,void * drvdata,const char * fmt,...)3933*4882a593Smuzhiyun struct device *device_create(struct class *class, struct device *parent,
3934*4882a593Smuzhiyun dev_t devt, void *drvdata, const char *fmt, ...)
3935*4882a593Smuzhiyun {
3936*4882a593Smuzhiyun va_list vargs;
3937*4882a593Smuzhiyun struct device *dev;
3938*4882a593Smuzhiyun
3939*4882a593Smuzhiyun va_start(vargs, fmt);
3940*4882a593Smuzhiyun dev = device_create_groups_vargs(class, parent, devt, drvdata, NULL,
3941*4882a593Smuzhiyun fmt, vargs);
3942*4882a593Smuzhiyun va_end(vargs);
3943*4882a593Smuzhiyun return dev;
3944*4882a593Smuzhiyun }
3945*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(device_create);
3946*4882a593Smuzhiyun
3947*4882a593Smuzhiyun /**
3948*4882a593Smuzhiyun * device_create_with_groups - creates a device and registers it with sysfs
3949*4882a593Smuzhiyun * @class: pointer to the struct class that this device should be registered to
3950*4882a593Smuzhiyun * @parent: pointer to the parent struct device of this new device, if any
3951*4882a593Smuzhiyun * @devt: the dev_t for the char device to be added
3952*4882a593Smuzhiyun * @drvdata: the data to be added to the device for callbacks
3953*4882a593Smuzhiyun * @groups: NULL-terminated list of attribute groups to be created
3954*4882a593Smuzhiyun * @fmt: string for the device's name
3955*4882a593Smuzhiyun *
3956*4882a593Smuzhiyun * This function can be used by char device classes. A struct device
3957*4882a593Smuzhiyun * will be created in sysfs, registered to the specified class.
3958*4882a593Smuzhiyun * Additional attributes specified in the groups parameter will also
3959*4882a593Smuzhiyun * be created automatically.
3960*4882a593Smuzhiyun *
3961*4882a593Smuzhiyun * A "dev" file will be created, showing the dev_t for the device, if
3962*4882a593Smuzhiyun * the dev_t is not 0,0.
3963*4882a593Smuzhiyun * If a pointer to a parent struct device is passed in, the newly created
3964*4882a593Smuzhiyun * struct device will be a child of that device in sysfs.
3965*4882a593Smuzhiyun * The pointer to the struct device will be returned from the call.
3966*4882a593Smuzhiyun * Any further sysfs files that might be required can be created using this
3967*4882a593Smuzhiyun * pointer.
3968*4882a593Smuzhiyun *
3969*4882a593Smuzhiyun * Returns &struct device pointer on success, or ERR_PTR() on error.
3970*4882a593Smuzhiyun *
3971*4882a593Smuzhiyun * Note: the struct class passed to this function must have previously
3972*4882a593Smuzhiyun * been created with a call to class_create().
3973*4882a593Smuzhiyun */
device_create_with_groups(struct class * class,struct device * parent,dev_t devt,void * drvdata,const struct attribute_group ** groups,const char * fmt,...)3974*4882a593Smuzhiyun struct device *device_create_with_groups(struct class *class,
3975*4882a593Smuzhiyun struct device *parent, dev_t devt,
3976*4882a593Smuzhiyun void *drvdata,
3977*4882a593Smuzhiyun const struct attribute_group **groups,
3978*4882a593Smuzhiyun const char *fmt, ...)
3979*4882a593Smuzhiyun {
3980*4882a593Smuzhiyun va_list vargs;
3981*4882a593Smuzhiyun struct device *dev;
3982*4882a593Smuzhiyun
3983*4882a593Smuzhiyun va_start(vargs, fmt);
3984*4882a593Smuzhiyun dev = device_create_groups_vargs(class, parent, devt, drvdata, groups,
3985*4882a593Smuzhiyun fmt, vargs);
3986*4882a593Smuzhiyun va_end(vargs);
3987*4882a593Smuzhiyun return dev;
3988*4882a593Smuzhiyun }
3989*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(device_create_with_groups);
3990*4882a593Smuzhiyun
3991*4882a593Smuzhiyun /**
3992*4882a593Smuzhiyun * device_destroy - removes a device that was created with device_create()
3993*4882a593Smuzhiyun * @class: pointer to the struct class that this device was registered with
3994*4882a593Smuzhiyun * @devt: the dev_t of the device that was previously registered
3995*4882a593Smuzhiyun *
3996*4882a593Smuzhiyun * This call unregisters and cleans up a device that was created with a
3997*4882a593Smuzhiyun * call to device_create().
3998*4882a593Smuzhiyun */
device_destroy(struct class * class,dev_t devt)3999*4882a593Smuzhiyun void device_destroy(struct class *class, dev_t devt)
4000*4882a593Smuzhiyun {
4001*4882a593Smuzhiyun struct device *dev;
4002*4882a593Smuzhiyun
4003*4882a593Smuzhiyun dev = class_find_device_by_devt(class, devt);
4004*4882a593Smuzhiyun if (dev) {
4005*4882a593Smuzhiyun put_device(dev);
4006*4882a593Smuzhiyun device_unregister(dev);
4007*4882a593Smuzhiyun }
4008*4882a593Smuzhiyun }
4009*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(device_destroy);
4010*4882a593Smuzhiyun
4011*4882a593Smuzhiyun /**
4012*4882a593Smuzhiyun * device_rename - renames a device
4013*4882a593Smuzhiyun * @dev: the pointer to the struct device to be renamed
4014*4882a593Smuzhiyun * @new_name: the new name of the device
4015*4882a593Smuzhiyun *
4016*4882a593Smuzhiyun * It is the responsibility of the caller to provide mutual
4017*4882a593Smuzhiyun * exclusion between two different calls of device_rename
4018*4882a593Smuzhiyun * on the same device to ensure that new_name is valid and
4019*4882a593Smuzhiyun * won't conflict with other devices.
4020*4882a593Smuzhiyun *
4021*4882a593Smuzhiyun * Note: Don't call this function. Currently, the networking layer calls this
4022*4882a593Smuzhiyun * function, but that will change. The following text from Kay Sievers offers
4023*4882a593Smuzhiyun * some insight:
4024*4882a593Smuzhiyun *
4025*4882a593Smuzhiyun * Renaming devices is racy at many levels, symlinks and other stuff are not
4026*4882a593Smuzhiyun * replaced atomically, and you get a "move" uevent, but it's not easy to
4027*4882a593Smuzhiyun * connect the event to the old and new device. Device nodes are not renamed at
4028*4882a593Smuzhiyun * all, there isn't even support for that in the kernel now.
4029*4882a593Smuzhiyun *
4030*4882a593Smuzhiyun * In the meantime, during renaming, your target name might be taken by another
4031*4882a593Smuzhiyun * driver, creating conflicts. Or the old name is taken directly after you
4032*4882a593Smuzhiyun * renamed it -- then you get events for the same DEVPATH, before you even see
4033*4882a593Smuzhiyun * the "move" event. It's just a mess, and nothing new should ever rely on
4034*4882a593Smuzhiyun * kernel device renaming. Besides that, it's not even implemented now for
4035*4882a593Smuzhiyun * other things than (driver-core wise very simple) network devices.
4036*4882a593Smuzhiyun *
4037*4882a593Smuzhiyun * We are currently about to change network renaming in udev to completely
4038*4882a593Smuzhiyun * disallow renaming of devices in the same namespace as the kernel uses,
4039*4882a593Smuzhiyun * because we can't solve the problems properly, that arise with swapping names
4040*4882a593Smuzhiyun * of multiple interfaces without races. Means, renaming of eth[0-9]* will only
4041*4882a593Smuzhiyun * be allowed to some other name than eth[0-9]*, for the aforementioned
4042*4882a593Smuzhiyun * reasons.
4043*4882a593Smuzhiyun *
4044*4882a593Smuzhiyun * Make up a "real" name in the driver before you register anything, or add
4045*4882a593Smuzhiyun * some other attributes for userspace to find the device, or use udev to add
4046*4882a593Smuzhiyun * symlinks -- but never rename kernel devices later, it's a complete mess. We
4047*4882a593Smuzhiyun * don't even want to get into that and try to implement the missing pieces in
4048*4882a593Smuzhiyun * the core. We really have other pieces to fix in the driver core mess. :)
4049*4882a593Smuzhiyun */
device_rename(struct device * dev,const char * new_name)4050*4882a593Smuzhiyun int device_rename(struct device *dev, const char *new_name)
4051*4882a593Smuzhiyun {
4052*4882a593Smuzhiyun struct kobject *kobj = &dev->kobj;
4053*4882a593Smuzhiyun char *old_device_name = NULL;
4054*4882a593Smuzhiyun int error;
4055*4882a593Smuzhiyun
4056*4882a593Smuzhiyun dev = get_device(dev);
4057*4882a593Smuzhiyun if (!dev)
4058*4882a593Smuzhiyun return -EINVAL;
4059*4882a593Smuzhiyun
4060*4882a593Smuzhiyun dev_dbg(dev, "renaming to %s\n", new_name);
4061*4882a593Smuzhiyun
4062*4882a593Smuzhiyun old_device_name = kstrdup(dev_name(dev), GFP_KERNEL);
4063*4882a593Smuzhiyun if (!old_device_name) {
4064*4882a593Smuzhiyun error = -ENOMEM;
4065*4882a593Smuzhiyun goto out;
4066*4882a593Smuzhiyun }
4067*4882a593Smuzhiyun
4068*4882a593Smuzhiyun if (dev->class) {
4069*4882a593Smuzhiyun error = sysfs_rename_link_ns(&dev->class->p->subsys.kobj,
4070*4882a593Smuzhiyun kobj, old_device_name,
4071*4882a593Smuzhiyun new_name, kobject_namespace(kobj));
4072*4882a593Smuzhiyun if (error)
4073*4882a593Smuzhiyun goto out;
4074*4882a593Smuzhiyun }
4075*4882a593Smuzhiyun
4076*4882a593Smuzhiyun error = kobject_rename(kobj, new_name);
4077*4882a593Smuzhiyun if (error)
4078*4882a593Smuzhiyun goto out;
4079*4882a593Smuzhiyun
4080*4882a593Smuzhiyun out:
4081*4882a593Smuzhiyun put_device(dev);
4082*4882a593Smuzhiyun
4083*4882a593Smuzhiyun kfree(old_device_name);
4084*4882a593Smuzhiyun
4085*4882a593Smuzhiyun return error;
4086*4882a593Smuzhiyun }
4087*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(device_rename);
4088*4882a593Smuzhiyun
device_move_class_links(struct device * dev,struct device * old_parent,struct device * new_parent)4089*4882a593Smuzhiyun static int device_move_class_links(struct device *dev,
4090*4882a593Smuzhiyun struct device *old_parent,
4091*4882a593Smuzhiyun struct device *new_parent)
4092*4882a593Smuzhiyun {
4093*4882a593Smuzhiyun int error = 0;
4094*4882a593Smuzhiyun
4095*4882a593Smuzhiyun if (old_parent)
4096*4882a593Smuzhiyun sysfs_remove_link(&dev->kobj, "device");
4097*4882a593Smuzhiyun if (new_parent)
4098*4882a593Smuzhiyun error = sysfs_create_link(&dev->kobj, &new_parent->kobj,
4099*4882a593Smuzhiyun "device");
4100*4882a593Smuzhiyun return error;
4101*4882a593Smuzhiyun }
4102*4882a593Smuzhiyun
4103*4882a593Smuzhiyun /**
4104*4882a593Smuzhiyun * device_move - moves a device to a new parent
4105*4882a593Smuzhiyun * @dev: the pointer to the struct device to be moved
4106*4882a593Smuzhiyun * @new_parent: the new parent of the device (can be NULL)
4107*4882a593Smuzhiyun * @dpm_order: how to reorder the dpm_list
4108*4882a593Smuzhiyun */
device_move(struct device * dev,struct device * new_parent,enum dpm_order dpm_order)4109*4882a593Smuzhiyun int device_move(struct device *dev, struct device *new_parent,
4110*4882a593Smuzhiyun enum dpm_order dpm_order)
4111*4882a593Smuzhiyun {
4112*4882a593Smuzhiyun int error;
4113*4882a593Smuzhiyun struct device *old_parent;
4114*4882a593Smuzhiyun struct kobject *new_parent_kobj;
4115*4882a593Smuzhiyun
4116*4882a593Smuzhiyun dev = get_device(dev);
4117*4882a593Smuzhiyun if (!dev)
4118*4882a593Smuzhiyun return -EINVAL;
4119*4882a593Smuzhiyun
4120*4882a593Smuzhiyun device_pm_lock();
4121*4882a593Smuzhiyun new_parent = get_device(new_parent);
4122*4882a593Smuzhiyun new_parent_kobj = get_device_parent(dev, new_parent);
4123*4882a593Smuzhiyun if (IS_ERR(new_parent_kobj)) {
4124*4882a593Smuzhiyun error = PTR_ERR(new_parent_kobj);
4125*4882a593Smuzhiyun put_device(new_parent);
4126*4882a593Smuzhiyun goto out;
4127*4882a593Smuzhiyun }
4128*4882a593Smuzhiyun
4129*4882a593Smuzhiyun pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev),
4130*4882a593Smuzhiyun __func__, new_parent ? dev_name(new_parent) : "<NULL>");
4131*4882a593Smuzhiyun error = kobject_move(&dev->kobj, new_parent_kobj);
4132*4882a593Smuzhiyun if (error) {
4133*4882a593Smuzhiyun cleanup_glue_dir(dev, new_parent_kobj);
4134*4882a593Smuzhiyun put_device(new_parent);
4135*4882a593Smuzhiyun goto out;
4136*4882a593Smuzhiyun }
4137*4882a593Smuzhiyun old_parent = dev->parent;
4138*4882a593Smuzhiyun dev->parent = new_parent;
4139*4882a593Smuzhiyun if (old_parent)
4140*4882a593Smuzhiyun klist_remove(&dev->p->knode_parent);
4141*4882a593Smuzhiyun if (new_parent) {
4142*4882a593Smuzhiyun klist_add_tail(&dev->p->knode_parent,
4143*4882a593Smuzhiyun &new_parent->p->klist_children);
4144*4882a593Smuzhiyun set_dev_node(dev, dev_to_node(new_parent));
4145*4882a593Smuzhiyun }
4146*4882a593Smuzhiyun
4147*4882a593Smuzhiyun if (dev->class) {
4148*4882a593Smuzhiyun error = device_move_class_links(dev, old_parent, new_parent);
4149*4882a593Smuzhiyun if (error) {
4150*4882a593Smuzhiyun /* We ignore errors on cleanup since we're hosed anyway... */
4151*4882a593Smuzhiyun device_move_class_links(dev, new_parent, old_parent);
4152*4882a593Smuzhiyun if (!kobject_move(&dev->kobj, &old_parent->kobj)) {
4153*4882a593Smuzhiyun if (new_parent)
4154*4882a593Smuzhiyun klist_remove(&dev->p->knode_parent);
4155*4882a593Smuzhiyun dev->parent = old_parent;
4156*4882a593Smuzhiyun if (old_parent) {
4157*4882a593Smuzhiyun klist_add_tail(&dev->p->knode_parent,
4158*4882a593Smuzhiyun &old_parent->p->klist_children);
4159*4882a593Smuzhiyun set_dev_node(dev, dev_to_node(old_parent));
4160*4882a593Smuzhiyun }
4161*4882a593Smuzhiyun }
4162*4882a593Smuzhiyun cleanup_glue_dir(dev, new_parent_kobj);
4163*4882a593Smuzhiyun put_device(new_parent);
4164*4882a593Smuzhiyun goto out;
4165*4882a593Smuzhiyun }
4166*4882a593Smuzhiyun }
4167*4882a593Smuzhiyun switch (dpm_order) {
4168*4882a593Smuzhiyun case DPM_ORDER_NONE:
4169*4882a593Smuzhiyun break;
4170*4882a593Smuzhiyun case DPM_ORDER_DEV_AFTER_PARENT:
4171*4882a593Smuzhiyun device_pm_move_after(dev, new_parent);
4172*4882a593Smuzhiyun devices_kset_move_after(dev, new_parent);
4173*4882a593Smuzhiyun break;
4174*4882a593Smuzhiyun case DPM_ORDER_PARENT_BEFORE_DEV:
4175*4882a593Smuzhiyun device_pm_move_before(new_parent, dev);
4176*4882a593Smuzhiyun devices_kset_move_before(new_parent, dev);
4177*4882a593Smuzhiyun break;
4178*4882a593Smuzhiyun case DPM_ORDER_DEV_LAST:
4179*4882a593Smuzhiyun device_pm_move_last(dev);
4180*4882a593Smuzhiyun devices_kset_move_last(dev);
4181*4882a593Smuzhiyun break;
4182*4882a593Smuzhiyun }
4183*4882a593Smuzhiyun
4184*4882a593Smuzhiyun put_device(old_parent);
4185*4882a593Smuzhiyun out:
4186*4882a593Smuzhiyun device_pm_unlock();
4187*4882a593Smuzhiyun put_device(dev);
4188*4882a593Smuzhiyun return error;
4189*4882a593Smuzhiyun }
4190*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(device_move);
4191*4882a593Smuzhiyun
device_attrs_change_owner(struct device * dev,kuid_t kuid,kgid_t kgid)4192*4882a593Smuzhiyun static int device_attrs_change_owner(struct device *dev, kuid_t kuid,
4193*4882a593Smuzhiyun kgid_t kgid)
4194*4882a593Smuzhiyun {
4195*4882a593Smuzhiyun struct kobject *kobj = &dev->kobj;
4196*4882a593Smuzhiyun struct class *class = dev->class;
4197*4882a593Smuzhiyun const struct device_type *type = dev->type;
4198*4882a593Smuzhiyun int error;
4199*4882a593Smuzhiyun
4200*4882a593Smuzhiyun if (class) {
4201*4882a593Smuzhiyun /*
4202*4882a593Smuzhiyun * Change the device groups of the device class for @dev to
4203*4882a593Smuzhiyun * @kuid/@kgid.
4204*4882a593Smuzhiyun */
4205*4882a593Smuzhiyun error = sysfs_groups_change_owner(kobj, class->dev_groups, kuid,
4206*4882a593Smuzhiyun kgid);
4207*4882a593Smuzhiyun if (error)
4208*4882a593Smuzhiyun return error;
4209*4882a593Smuzhiyun }
4210*4882a593Smuzhiyun
4211*4882a593Smuzhiyun if (type) {
4212*4882a593Smuzhiyun /*
4213*4882a593Smuzhiyun * Change the device groups of the device type for @dev to
4214*4882a593Smuzhiyun * @kuid/@kgid.
4215*4882a593Smuzhiyun */
4216*4882a593Smuzhiyun error = sysfs_groups_change_owner(kobj, type->groups, kuid,
4217*4882a593Smuzhiyun kgid);
4218*4882a593Smuzhiyun if (error)
4219*4882a593Smuzhiyun return error;
4220*4882a593Smuzhiyun }
4221*4882a593Smuzhiyun
4222*4882a593Smuzhiyun /* Change the device groups of @dev to @kuid/@kgid. */
4223*4882a593Smuzhiyun error = sysfs_groups_change_owner(kobj, dev->groups, kuid, kgid);
4224*4882a593Smuzhiyun if (error)
4225*4882a593Smuzhiyun return error;
4226*4882a593Smuzhiyun
4227*4882a593Smuzhiyun if (device_supports_offline(dev) && !dev->offline_disabled) {
4228*4882a593Smuzhiyun /* Change online device attributes of @dev to @kuid/@kgid. */
4229*4882a593Smuzhiyun error = sysfs_file_change_owner(kobj, dev_attr_online.attr.name,
4230*4882a593Smuzhiyun kuid, kgid);
4231*4882a593Smuzhiyun if (error)
4232*4882a593Smuzhiyun return error;
4233*4882a593Smuzhiyun }
4234*4882a593Smuzhiyun
4235*4882a593Smuzhiyun return 0;
4236*4882a593Smuzhiyun }
4237*4882a593Smuzhiyun
4238*4882a593Smuzhiyun /**
4239*4882a593Smuzhiyun * device_change_owner - change the owner of an existing device.
4240*4882a593Smuzhiyun * @dev: device.
4241*4882a593Smuzhiyun * @kuid: new owner's kuid
4242*4882a593Smuzhiyun * @kgid: new owner's kgid
4243*4882a593Smuzhiyun *
4244*4882a593Smuzhiyun * This changes the owner of @dev and its corresponding sysfs entries to
4245*4882a593Smuzhiyun * @kuid/@kgid. This function closely mirrors how @dev was added via driver
4246*4882a593Smuzhiyun * core.
4247*4882a593Smuzhiyun *
4248*4882a593Smuzhiyun * Returns 0 on success or error code on failure.
4249*4882a593Smuzhiyun */
device_change_owner(struct device * dev,kuid_t kuid,kgid_t kgid)4250*4882a593Smuzhiyun int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid)
4251*4882a593Smuzhiyun {
4252*4882a593Smuzhiyun int error;
4253*4882a593Smuzhiyun struct kobject *kobj = &dev->kobj;
4254*4882a593Smuzhiyun
4255*4882a593Smuzhiyun dev = get_device(dev);
4256*4882a593Smuzhiyun if (!dev)
4257*4882a593Smuzhiyun return -EINVAL;
4258*4882a593Smuzhiyun
4259*4882a593Smuzhiyun /*
4260*4882a593Smuzhiyun * Change the kobject and the default attributes and groups of the
4261*4882a593Smuzhiyun * ktype associated with it to @kuid/@kgid.
4262*4882a593Smuzhiyun */
4263*4882a593Smuzhiyun error = sysfs_change_owner(kobj, kuid, kgid);
4264*4882a593Smuzhiyun if (error)
4265*4882a593Smuzhiyun goto out;
4266*4882a593Smuzhiyun
4267*4882a593Smuzhiyun /*
4268*4882a593Smuzhiyun * Change the uevent file for @dev to the new owner. The uevent file
4269*4882a593Smuzhiyun * was created in a separate step when @dev got added and we mirror
4270*4882a593Smuzhiyun * that step here.
4271*4882a593Smuzhiyun */
4272*4882a593Smuzhiyun error = sysfs_file_change_owner(kobj, dev_attr_uevent.attr.name, kuid,
4273*4882a593Smuzhiyun kgid);
4274*4882a593Smuzhiyun if (error)
4275*4882a593Smuzhiyun goto out;
4276*4882a593Smuzhiyun
4277*4882a593Smuzhiyun /*
4278*4882a593Smuzhiyun * Change the device groups, the device groups associated with the
4279*4882a593Smuzhiyun * device class, and the groups associated with the device type of @dev
4280*4882a593Smuzhiyun * to @kuid/@kgid.
4281*4882a593Smuzhiyun */
4282*4882a593Smuzhiyun error = device_attrs_change_owner(dev, kuid, kgid);
4283*4882a593Smuzhiyun if (error)
4284*4882a593Smuzhiyun goto out;
4285*4882a593Smuzhiyun
4286*4882a593Smuzhiyun error = dpm_sysfs_change_owner(dev, kuid, kgid);
4287*4882a593Smuzhiyun if (error)
4288*4882a593Smuzhiyun goto out;
4289*4882a593Smuzhiyun
4290*4882a593Smuzhiyun #ifdef CONFIG_BLOCK
4291*4882a593Smuzhiyun if (sysfs_deprecated && dev->class == &block_class)
4292*4882a593Smuzhiyun goto out;
4293*4882a593Smuzhiyun #endif
4294*4882a593Smuzhiyun
4295*4882a593Smuzhiyun /*
4296*4882a593Smuzhiyun * Change the owner of the symlink located in the class directory of
4297*4882a593Smuzhiyun * the device class associated with @dev which points to the actual
4298*4882a593Smuzhiyun * directory entry for @dev to @kuid/@kgid. This ensures that the
4299*4882a593Smuzhiyun * symlink shows the same permissions as its target.
4300*4882a593Smuzhiyun */
4301*4882a593Smuzhiyun error = sysfs_link_change_owner(&dev->class->p->subsys.kobj, &dev->kobj,
4302*4882a593Smuzhiyun dev_name(dev), kuid, kgid);
4303*4882a593Smuzhiyun if (error)
4304*4882a593Smuzhiyun goto out;
4305*4882a593Smuzhiyun
4306*4882a593Smuzhiyun out:
4307*4882a593Smuzhiyun put_device(dev);
4308*4882a593Smuzhiyun return error;
4309*4882a593Smuzhiyun }
4310*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(device_change_owner);
4311*4882a593Smuzhiyun
4312*4882a593Smuzhiyun /**
4313*4882a593Smuzhiyun * device_shutdown - call ->shutdown() on each device to shutdown.
4314*4882a593Smuzhiyun */
device_shutdown(void)4315*4882a593Smuzhiyun void device_shutdown(void)
4316*4882a593Smuzhiyun {
4317*4882a593Smuzhiyun struct device *dev, *parent;
4318*4882a593Smuzhiyun
4319*4882a593Smuzhiyun wait_for_device_probe();
4320*4882a593Smuzhiyun device_block_probing();
4321*4882a593Smuzhiyun
4322*4882a593Smuzhiyun cpufreq_suspend();
4323*4882a593Smuzhiyun
4324*4882a593Smuzhiyun spin_lock(&devices_kset->list_lock);
4325*4882a593Smuzhiyun /*
4326*4882a593Smuzhiyun * Walk the devices list backward, shutting down each in turn.
4327*4882a593Smuzhiyun * Beware that device unplug events may also start pulling
4328*4882a593Smuzhiyun * devices offline, even as the system is shutting down.
4329*4882a593Smuzhiyun */
4330*4882a593Smuzhiyun while (!list_empty(&devices_kset->list)) {
4331*4882a593Smuzhiyun dev = list_entry(devices_kset->list.prev, struct device,
4332*4882a593Smuzhiyun kobj.entry);
4333*4882a593Smuzhiyun
4334*4882a593Smuzhiyun /*
4335*4882a593Smuzhiyun * hold reference count of device's parent to
4336*4882a593Smuzhiyun * prevent it from being freed because parent's
4337*4882a593Smuzhiyun * lock is to be held
4338*4882a593Smuzhiyun */
4339*4882a593Smuzhiyun parent = get_device(dev->parent);
4340*4882a593Smuzhiyun get_device(dev);
4341*4882a593Smuzhiyun /*
4342*4882a593Smuzhiyun * Make sure the device is off the kset list, in the
4343*4882a593Smuzhiyun * event that dev->*->shutdown() doesn't remove it.
4344*4882a593Smuzhiyun */
4345*4882a593Smuzhiyun list_del_init(&dev->kobj.entry);
4346*4882a593Smuzhiyun spin_unlock(&devices_kset->list_lock);
4347*4882a593Smuzhiyun
4348*4882a593Smuzhiyun /* hold lock to avoid race with probe/release */
4349*4882a593Smuzhiyun if (parent)
4350*4882a593Smuzhiyun device_lock(parent);
4351*4882a593Smuzhiyun device_lock(dev);
4352*4882a593Smuzhiyun
4353*4882a593Smuzhiyun /* Don't allow any more runtime suspends */
4354*4882a593Smuzhiyun pm_runtime_get_noresume(dev);
4355*4882a593Smuzhiyun pm_runtime_barrier(dev);
4356*4882a593Smuzhiyun
4357*4882a593Smuzhiyun if (dev->class && dev->class->shutdown_pre) {
4358*4882a593Smuzhiyun if (initcall_debug)
4359*4882a593Smuzhiyun dev_info(dev, "shutdown_pre\n");
4360*4882a593Smuzhiyun dev->class->shutdown_pre(dev);
4361*4882a593Smuzhiyun }
4362*4882a593Smuzhiyun if (dev->bus && dev->bus->shutdown) {
4363*4882a593Smuzhiyun if (initcall_debug)
4364*4882a593Smuzhiyun dev_info(dev, "shutdown\n");
4365*4882a593Smuzhiyun dev->bus->shutdown(dev);
4366*4882a593Smuzhiyun } else if (dev->driver && dev->driver->shutdown) {
4367*4882a593Smuzhiyun if (initcall_debug)
4368*4882a593Smuzhiyun dev_info(dev, "shutdown\n");
4369*4882a593Smuzhiyun dev->driver->shutdown(dev);
4370*4882a593Smuzhiyun }
4371*4882a593Smuzhiyun
4372*4882a593Smuzhiyun device_unlock(dev);
4373*4882a593Smuzhiyun if (parent)
4374*4882a593Smuzhiyun device_unlock(parent);
4375*4882a593Smuzhiyun
4376*4882a593Smuzhiyun put_device(dev);
4377*4882a593Smuzhiyun put_device(parent);
4378*4882a593Smuzhiyun
4379*4882a593Smuzhiyun spin_lock(&devices_kset->list_lock);
4380*4882a593Smuzhiyun }
4381*4882a593Smuzhiyun spin_unlock(&devices_kset->list_lock);
4382*4882a593Smuzhiyun }
4383*4882a593Smuzhiyun
4384*4882a593Smuzhiyun /*
4385*4882a593Smuzhiyun * Device logging functions
4386*4882a593Smuzhiyun */
4387*4882a593Smuzhiyun
4388*4882a593Smuzhiyun #ifdef CONFIG_PRINTK
4389*4882a593Smuzhiyun static void
set_dev_info(const struct device * dev,struct dev_printk_info * dev_info)4390*4882a593Smuzhiyun set_dev_info(const struct device *dev, struct dev_printk_info *dev_info)
4391*4882a593Smuzhiyun {
4392*4882a593Smuzhiyun const char *subsys;
4393*4882a593Smuzhiyun
4394*4882a593Smuzhiyun memset(dev_info, 0, sizeof(*dev_info));
4395*4882a593Smuzhiyun
4396*4882a593Smuzhiyun if (dev->class)
4397*4882a593Smuzhiyun subsys = dev->class->name;
4398*4882a593Smuzhiyun else if (dev->bus)
4399*4882a593Smuzhiyun subsys = dev->bus->name;
4400*4882a593Smuzhiyun else
4401*4882a593Smuzhiyun return;
4402*4882a593Smuzhiyun
4403*4882a593Smuzhiyun strscpy(dev_info->subsystem, subsys, sizeof(dev_info->subsystem));
4404*4882a593Smuzhiyun
4405*4882a593Smuzhiyun /*
4406*4882a593Smuzhiyun * Add device identifier DEVICE=:
4407*4882a593Smuzhiyun * b12:8 block dev_t
4408*4882a593Smuzhiyun * c127:3 char dev_t
4409*4882a593Smuzhiyun * n8 netdev ifindex
4410*4882a593Smuzhiyun * +sound:card0 subsystem:devname
4411*4882a593Smuzhiyun */
4412*4882a593Smuzhiyun if (MAJOR(dev->devt)) {
4413*4882a593Smuzhiyun char c;
4414*4882a593Smuzhiyun
4415*4882a593Smuzhiyun if (strcmp(subsys, "block") == 0)
4416*4882a593Smuzhiyun c = 'b';
4417*4882a593Smuzhiyun else
4418*4882a593Smuzhiyun c = 'c';
4419*4882a593Smuzhiyun
4420*4882a593Smuzhiyun snprintf(dev_info->device, sizeof(dev_info->device),
4421*4882a593Smuzhiyun "%c%u:%u", c, MAJOR(dev->devt), MINOR(dev->devt));
4422*4882a593Smuzhiyun } else if (strcmp(subsys, "net") == 0) {
4423*4882a593Smuzhiyun struct net_device *net = to_net_dev(dev);
4424*4882a593Smuzhiyun
4425*4882a593Smuzhiyun snprintf(dev_info->device, sizeof(dev_info->device),
4426*4882a593Smuzhiyun "n%u", net->ifindex);
4427*4882a593Smuzhiyun } else {
4428*4882a593Smuzhiyun snprintf(dev_info->device, sizeof(dev_info->device),
4429*4882a593Smuzhiyun "+%s:%s", subsys, dev_name(dev));
4430*4882a593Smuzhiyun }
4431*4882a593Smuzhiyun }
4432*4882a593Smuzhiyun
dev_vprintk_emit(int level,const struct device * dev,const char * fmt,va_list args)4433*4882a593Smuzhiyun int dev_vprintk_emit(int level, const struct device *dev,
4434*4882a593Smuzhiyun const char *fmt, va_list args)
4435*4882a593Smuzhiyun {
4436*4882a593Smuzhiyun struct dev_printk_info dev_info;
4437*4882a593Smuzhiyun
4438*4882a593Smuzhiyun set_dev_info(dev, &dev_info);
4439*4882a593Smuzhiyun
4440*4882a593Smuzhiyun return vprintk_emit(0, level, &dev_info, fmt, args);
4441*4882a593Smuzhiyun }
4442*4882a593Smuzhiyun EXPORT_SYMBOL(dev_vprintk_emit);
4443*4882a593Smuzhiyun
dev_printk_emit(int level,const struct device * dev,const char * fmt,...)4444*4882a593Smuzhiyun int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...)
4445*4882a593Smuzhiyun {
4446*4882a593Smuzhiyun va_list args;
4447*4882a593Smuzhiyun int r;
4448*4882a593Smuzhiyun
4449*4882a593Smuzhiyun va_start(args, fmt);
4450*4882a593Smuzhiyun
4451*4882a593Smuzhiyun r = dev_vprintk_emit(level, dev, fmt, args);
4452*4882a593Smuzhiyun
4453*4882a593Smuzhiyun va_end(args);
4454*4882a593Smuzhiyun
4455*4882a593Smuzhiyun return r;
4456*4882a593Smuzhiyun }
4457*4882a593Smuzhiyun EXPORT_SYMBOL(dev_printk_emit);
4458*4882a593Smuzhiyun
__dev_printk(const char * level,const struct device * dev,struct va_format * vaf)4459*4882a593Smuzhiyun static void __dev_printk(const char *level, const struct device *dev,
4460*4882a593Smuzhiyun struct va_format *vaf)
4461*4882a593Smuzhiyun {
4462*4882a593Smuzhiyun if (dev)
4463*4882a593Smuzhiyun dev_printk_emit(level[1] - '0', dev, "%s %s: %pV",
4464*4882a593Smuzhiyun dev_driver_string(dev), dev_name(dev), vaf);
4465*4882a593Smuzhiyun else
4466*4882a593Smuzhiyun printk("%s(NULL device *): %pV", level, vaf);
4467*4882a593Smuzhiyun }
4468*4882a593Smuzhiyun
dev_printk(const char * level,const struct device * dev,const char * fmt,...)4469*4882a593Smuzhiyun void dev_printk(const char *level, const struct device *dev,
4470*4882a593Smuzhiyun const char *fmt, ...)
4471*4882a593Smuzhiyun {
4472*4882a593Smuzhiyun struct va_format vaf;
4473*4882a593Smuzhiyun va_list args;
4474*4882a593Smuzhiyun
4475*4882a593Smuzhiyun va_start(args, fmt);
4476*4882a593Smuzhiyun
4477*4882a593Smuzhiyun vaf.fmt = fmt;
4478*4882a593Smuzhiyun vaf.va = &args;
4479*4882a593Smuzhiyun
4480*4882a593Smuzhiyun __dev_printk(level, dev, &vaf);
4481*4882a593Smuzhiyun
4482*4882a593Smuzhiyun va_end(args);
4483*4882a593Smuzhiyun }
4484*4882a593Smuzhiyun EXPORT_SYMBOL(dev_printk);
4485*4882a593Smuzhiyun
4486*4882a593Smuzhiyun #define define_dev_printk_level(func, kern_level) \
4487*4882a593Smuzhiyun void func(const struct device *dev, const char *fmt, ...) \
4488*4882a593Smuzhiyun { \
4489*4882a593Smuzhiyun struct va_format vaf; \
4490*4882a593Smuzhiyun va_list args; \
4491*4882a593Smuzhiyun \
4492*4882a593Smuzhiyun va_start(args, fmt); \
4493*4882a593Smuzhiyun \
4494*4882a593Smuzhiyun vaf.fmt = fmt; \
4495*4882a593Smuzhiyun vaf.va = &args; \
4496*4882a593Smuzhiyun \
4497*4882a593Smuzhiyun __dev_printk(kern_level, dev, &vaf); \
4498*4882a593Smuzhiyun \
4499*4882a593Smuzhiyun va_end(args); \
4500*4882a593Smuzhiyun } \
4501*4882a593Smuzhiyun EXPORT_SYMBOL(func);
4502*4882a593Smuzhiyun
4503*4882a593Smuzhiyun define_dev_printk_level(_dev_emerg, KERN_EMERG);
4504*4882a593Smuzhiyun define_dev_printk_level(_dev_alert, KERN_ALERT);
4505*4882a593Smuzhiyun define_dev_printk_level(_dev_crit, KERN_CRIT);
4506*4882a593Smuzhiyun define_dev_printk_level(_dev_err, KERN_ERR);
4507*4882a593Smuzhiyun define_dev_printk_level(_dev_warn, KERN_WARNING);
4508*4882a593Smuzhiyun define_dev_printk_level(_dev_notice, KERN_NOTICE);
4509*4882a593Smuzhiyun define_dev_printk_level(_dev_info, KERN_INFO);
4510*4882a593Smuzhiyun
4511*4882a593Smuzhiyun #endif
4512*4882a593Smuzhiyun
4513*4882a593Smuzhiyun /**
4514*4882a593Smuzhiyun * dev_err_probe - probe error check and log helper
4515*4882a593Smuzhiyun * @dev: the pointer to the struct device
4516*4882a593Smuzhiyun * @err: error value to test
4517*4882a593Smuzhiyun * @fmt: printf-style format string
4518*4882a593Smuzhiyun * @...: arguments as specified in the format string
4519*4882a593Smuzhiyun *
4520*4882a593Smuzhiyun * This helper implements common pattern present in probe functions for error
4521*4882a593Smuzhiyun * checking: print debug or error message depending if the error value is
4522*4882a593Smuzhiyun * -EPROBE_DEFER and propagate error upwards.
4523*4882a593Smuzhiyun * In case of -EPROBE_DEFER it sets also defer probe reason, which can be
4524*4882a593Smuzhiyun * checked later by reading devices_deferred debugfs attribute.
4525*4882a593Smuzhiyun * It replaces code sequence::
4526*4882a593Smuzhiyun *
4527*4882a593Smuzhiyun * if (err != -EPROBE_DEFER)
4528*4882a593Smuzhiyun * dev_err(dev, ...);
4529*4882a593Smuzhiyun * else
4530*4882a593Smuzhiyun * dev_dbg(dev, ...);
4531*4882a593Smuzhiyun * return err;
4532*4882a593Smuzhiyun *
4533*4882a593Smuzhiyun * with::
4534*4882a593Smuzhiyun *
4535*4882a593Smuzhiyun * return dev_err_probe(dev, err, ...);
4536*4882a593Smuzhiyun *
4537*4882a593Smuzhiyun * Returns @err.
4538*4882a593Smuzhiyun *
4539*4882a593Smuzhiyun */
dev_err_probe(const struct device * dev,int err,const char * fmt,...)4540*4882a593Smuzhiyun int dev_err_probe(const struct device *dev, int err, const char *fmt, ...)
4541*4882a593Smuzhiyun {
4542*4882a593Smuzhiyun struct va_format vaf;
4543*4882a593Smuzhiyun va_list args;
4544*4882a593Smuzhiyun
4545*4882a593Smuzhiyun va_start(args, fmt);
4546*4882a593Smuzhiyun vaf.fmt = fmt;
4547*4882a593Smuzhiyun vaf.va = &args;
4548*4882a593Smuzhiyun
4549*4882a593Smuzhiyun if (err != -EPROBE_DEFER) {
4550*4882a593Smuzhiyun dev_err(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
4551*4882a593Smuzhiyun } else {
4552*4882a593Smuzhiyun device_set_deferred_probe_reason(dev, &vaf);
4553*4882a593Smuzhiyun dev_dbg(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
4554*4882a593Smuzhiyun }
4555*4882a593Smuzhiyun
4556*4882a593Smuzhiyun va_end(args);
4557*4882a593Smuzhiyun
4558*4882a593Smuzhiyun return err;
4559*4882a593Smuzhiyun }
4560*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_err_probe);
4561*4882a593Smuzhiyun
fwnode_is_primary(struct fwnode_handle * fwnode)4562*4882a593Smuzhiyun static inline bool fwnode_is_primary(struct fwnode_handle *fwnode)
4563*4882a593Smuzhiyun {
4564*4882a593Smuzhiyun return fwnode && !IS_ERR(fwnode->secondary);
4565*4882a593Smuzhiyun }
4566*4882a593Smuzhiyun
4567*4882a593Smuzhiyun /**
4568*4882a593Smuzhiyun * set_primary_fwnode - Change the primary firmware node of a given device.
4569*4882a593Smuzhiyun * @dev: Device to handle.
4570*4882a593Smuzhiyun * @fwnode: New primary firmware node of the device.
4571*4882a593Smuzhiyun *
4572*4882a593Smuzhiyun * Set the device's firmware node pointer to @fwnode, but if a secondary
4573*4882a593Smuzhiyun * firmware node of the device is present, preserve it.
4574*4882a593Smuzhiyun */
set_primary_fwnode(struct device * dev,struct fwnode_handle * fwnode)4575*4882a593Smuzhiyun void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
4576*4882a593Smuzhiyun {
4577*4882a593Smuzhiyun struct device *parent = dev->parent;
4578*4882a593Smuzhiyun struct fwnode_handle *fn = dev->fwnode;
4579*4882a593Smuzhiyun
4580*4882a593Smuzhiyun if (fwnode) {
4581*4882a593Smuzhiyun if (fwnode_is_primary(fn))
4582*4882a593Smuzhiyun fn = fn->secondary;
4583*4882a593Smuzhiyun
4584*4882a593Smuzhiyun if (fn) {
4585*4882a593Smuzhiyun WARN_ON(fwnode->secondary);
4586*4882a593Smuzhiyun fwnode->secondary = fn;
4587*4882a593Smuzhiyun }
4588*4882a593Smuzhiyun dev->fwnode = fwnode;
4589*4882a593Smuzhiyun } else {
4590*4882a593Smuzhiyun if (fwnode_is_primary(fn)) {
4591*4882a593Smuzhiyun dev->fwnode = fn->secondary;
4592*4882a593Smuzhiyun if (!(parent && fn == parent->fwnode))
4593*4882a593Smuzhiyun fn->secondary = NULL;
4594*4882a593Smuzhiyun } else {
4595*4882a593Smuzhiyun dev->fwnode = NULL;
4596*4882a593Smuzhiyun }
4597*4882a593Smuzhiyun }
4598*4882a593Smuzhiyun }
4599*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(set_primary_fwnode);
4600*4882a593Smuzhiyun
4601*4882a593Smuzhiyun /**
4602*4882a593Smuzhiyun * set_secondary_fwnode - Change the secondary firmware node of a given device.
4603*4882a593Smuzhiyun * @dev: Device to handle.
4604*4882a593Smuzhiyun * @fwnode: New secondary firmware node of the device.
4605*4882a593Smuzhiyun *
4606*4882a593Smuzhiyun * If a primary firmware node of the device is present, set its secondary
4607*4882a593Smuzhiyun * pointer to @fwnode. Otherwise, set the device's firmware node pointer to
4608*4882a593Smuzhiyun * @fwnode.
4609*4882a593Smuzhiyun */
set_secondary_fwnode(struct device * dev,struct fwnode_handle * fwnode)4610*4882a593Smuzhiyun void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
4611*4882a593Smuzhiyun {
4612*4882a593Smuzhiyun if (fwnode)
4613*4882a593Smuzhiyun fwnode->secondary = ERR_PTR(-ENODEV);
4614*4882a593Smuzhiyun
4615*4882a593Smuzhiyun if (fwnode_is_primary(dev->fwnode))
4616*4882a593Smuzhiyun dev->fwnode->secondary = fwnode;
4617*4882a593Smuzhiyun else
4618*4882a593Smuzhiyun dev->fwnode = fwnode;
4619*4882a593Smuzhiyun }
4620*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(set_secondary_fwnode);
4621*4882a593Smuzhiyun
4622*4882a593Smuzhiyun /**
4623*4882a593Smuzhiyun * device_set_of_node_from_dev - reuse device-tree node of another device
4624*4882a593Smuzhiyun * @dev: device whose device-tree node is being set
4625*4882a593Smuzhiyun * @dev2: device whose device-tree node is being reused
4626*4882a593Smuzhiyun *
4627*4882a593Smuzhiyun * Takes another reference to the new device-tree node after first dropping
4628*4882a593Smuzhiyun * any reference held to the old node.
4629*4882a593Smuzhiyun */
device_set_of_node_from_dev(struct device * dev,const struct device * dev2)4630*4882a593Smuzhiyun void device_set_of_node_from_dev(struct device *dev, const struct device *dev2)
4631*4882a593Smuzhiyun {
4632*4882a593Smuzhiyun of_node_put(dev->of_node);
4633*4882a593Smuzhiyun dev->of_node = of_node_get(dev2->of_node);
4634*4882a593Smuzhiyun dev->of_node_reused = true;
4635*4882a593Smuzhiyun }
4636*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(device_set_of_node_from_dev);
4637*4882a593Smuzhiyun
device_match_name(struct device * dev,const void * name)4638*4882a593Smuzhiyun int device_match_name(struct device *dev, const void *name)
4639*4882a593Smuzhiyun {
4640*4882a593Smuzhiyun return sysfs_streq(dev_name(dev), name);
4641*4882a593Smuzhiyun }
4642*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(device_match_name);
4643*4882a593Smuzhiyun
device_match_of_node(struct device * dev,const void * np)4644*4882a593Smuzhiyun int device_match_of_node(struct device *dev, const void *np)
4645*4882a593Smuzhiyun {
4646*4882a593Smuzhiyun return dev->of_node == np;
4647*4882a593Smuzhiyun }
4648*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(device_match_of_node);
4649*4882a593Smuzhiyun
device_match_fwnode(struct device * dev,const void * fwnode)4650*4882a593Smuzhiyun int device_match_fwnode(struct device *dev, const void *fwnode)
4651*4882a593Smuzhiyun {
4652*4882a593Smuzhiyun return dev_fwnode(dev) == fwnode;
4653*4882a593Smuzhiyun }
4654*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(device_match_fwnode);
4655*4882a593Smuzhiyun
device_match_devt(struct device * dev,const void * pdevt)4656*4882a593Smuzhiyun int device_match_devt(struct device *dev, const void *pdevt)
4657*4882a593Smuzhiyun {
4658*4882a593Smuzhiyun return dev->devt == *(dev_t *)pdevt;
4659*4882a593Smuzhiyun }
4660*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(device_match_devt);
4661*4882a593Smuzhiyun
device_match_acpi_dev(struct device * dev,const void * adev)4662*4882a593Smuzhiyun int device_match_acpi_dev(struct device *dev, const void *adev)
4663*4882a593Smuzhiyun {
4664*4882a593Smuzhiyun return ACPI_COMPANION(dev) == adev;
4665*4882a593Smuzhiyun }
4666*4882a593Smuzhiyun EXPORT_SYMBOL(device_match_acpi_dev);
4667*4882a593Smuzhiyun
device_match_any(struct device * dev,const void * unused)4668*4882a593Smuzhiyun int device_match_any(struct device *dev, const void *unused)
4669*4882a593Smuzhiyun {
4670*4882a593Smuzhiyun return 1;
4671*4882a593Smuzhiyun }
4672*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(device_match_any);
4673