1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * drivers/base/dd.c - The core device/driver interactions.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * This file contains the (sometimes tricky) code that controls the
6*4882a593Smuzhiyun * interactions between devices and drivers, which primarily includes
7*4882a593Smuzhiyun * driver binding and unbinding.
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * All of this code used to exist in drivers/base/bus.c, but was
10*4882a593Smuzhiyun * relocated to here in the name of compartmentalization (since it wasn't
11*4882a593Smuzhiyun * strictly code just for the 'struct bus_type'.
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * Copyright (c) 2002-5 Patrick Mochel
14*4882a593Smuzhiyun * Copyright (c) 2002-3 Open Source Development Labs
15*4882a593Smuzhiyun * Copyright (c) 2007-2009 Greg Kroah-Hartman <gregkh@suse.de>
16*4882a593Smuzhiyun * Copyright (c) 2007-2009 Novell Inc.
17*4882a593Smuzhiyun */
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #include <linux/debugfs.h>
20*4882a593Smuzhiyun #include <linux/device.h>
21*4882a593Smuzhiyun #include <linux/delay.h>
22*4882a593Smuzhiyun #include <linux/dma-map-ops.h>
23*4882a593Smuzhiyun #include <linux/init.h>
24*4882a593Smuzhiyun #include <linux/module.h>
25*4882a593Smuzhiyun #include <linux/kthread.h>
26*4882a593Smuzhiyun #include <linux/wait.h>
27*4882a593Smuzhiyun #include <linux/async.h>
28*4882a593Smuzhiyun #include <linux/pm_runtime.h>
29*4882a593Smuzhiyun #include <linux/pinctrl/devinfo.h>
30*4882a593Smuzhiyun #include <linux/slab.h>
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #include "base.h"
33*4882a593Smuzhiyun #include "power/power.h"
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun /*
36*4882a593Smuzhiyun * Deferred Probe infrastructure.
37*4882a593Smuzhiyun *
38*4882a593Smuzhiyun * Sometimes driver probe order matters, but the kernel doesn't always have
39*4882a593Smuzhiyun * dependency information which means some drivers will get probed before a
40*4882a593Smuzhiyun * resource it depends on is available. For example, an SDHCI driver may
41*4882a593Smuzhiyun * first need a GPIO line from an i2c GPIO controller before it can be
42*4882a593Smuzhiyun * initialized. If a required resource is not available yet, a driver can
43*4882a593Smuzhiyun * request probing to be deferred by returning -EPROBE_DEFER from its probe hook
44*4882a593Smuzhiyun *
45*4882a593Smuzhiyun * Deferred probe maintains two lists of devices, a pending list and an active
46*4882a593Smuzhiyun * list. A driver returning -EPROBE_DEFER causes the device to be added to the
47*4882a593Smuzhiyun * pending list. A successful driver probe will trigger moving all devices
48*4882a593Smuzhiyun * from the pending to the active list so that the workqueue will eventually
49*4882a593Smuzhiyun * retry them.
50*4882a593Smuzhiyun *
51*4882a593Smuzhiyun * The deferred_probe_mutex must be held any time the deferred_probe_*_list
52*4882a593Smuzhiyun * of the (struct device*)->p->deferred_probe pointers are manipulated
53*4882a593Smuzhiyun */
54*4882a593Smuzhiyun static DEFINE_MUTEX(deferred_probe_mutex);
55*4882a593Smuzhiyun static LIST_HEAD(deferred_probe_pending_list);
56*4882a593Smuzhiyun static LIST_HEAD(deferred_probe_active_list);
57*4882a593Smuzhiyun static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
58*4882a593Smuzhiyun static struct dentry *deferred_devices;
59*4882a593Smuzhiyun static bool initcalls_done;
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun /* Save the async probe drivers' name from kernel cmdline */
62*4882a593Smuzhiyun #define ASYNC_DRV_NAMES_MAX_LEN 256
63*4882a593Smuzhiyun static char async_probe_drv_names[ASYNC_DRV_NAMES_MAX_LEN];
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun /*
66*4882a593Smuzhiyun * In some cases, like suspend to RAM or hibernation, It might be reasonable
67*4882a593Smuzhiyun * to prohibit probing of devices as it could be unsafe.
68*4882a593Smuzhiyun * Once defer_all_probes is true all drivers probes will be forcibly deferred.
69*4882a593Smuzhiyun */
70*4882a593Smuzhiyun static bool defer_all_probes;
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun /*
73*4882a593Smuzhiyun * deferred_probe_work_func() - Retry probing devices in the active list.
74*4882a593Smuzhiyun */
deferred_probe_work_func(struct work_struct * work)75*4882a593Smuzhiyun static void deferred_probe_work_func(struct work_struct *work)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun struct device *dev;
78*4882a593Smuzhiyun struct device_private *private;
79*4882a593Smuzhiyun /*
80*4882a593Smuzhiyun * This block processes every device in the deferred 'active' list.
81*4882a593Smuzhiyun * Each device is removed from the active list and passed to
82*4882a593Smuzhiyun * bus_probe_device() to re-attempt the probe. The loop continues
83*4882a593Smuzhiyun * until every device in the active list is removed and retried.
84*4882a593Smuzhiyun *
85*4882a593Smuzhiyun * Note: Once the device is removed from the list and the mutex is
86*4882a593Smuzhiyun * released, it is possible for the device get freed by another thread
87*4882a593Smuzhiyun * and cause a illegal pointer dereference. This code uses
88*4882a593Smuzhiyun * get/put_device() to ensure the device structure cannot disappear
89*4882a593Smuzhiyun * from under our feet.
90*4882a593Smuzhiyun */
91*4882a593Smuzhiyun mutex_lock(&deferred_probe_mutex);
92*4882a593Smuzhiyun while (!list_empty(&deferred_probe_active_list)) {
93*4882a593Smuzhiyun private = list_first_entry(&deferred_probe_active_list,
94*4882a593Smuzhiyun typeof(*dev->p), deferred_probe);
95*4882a593Smuzhiyun dev = private->device;
96*4882a593Smuzhiyun list_del_init(&private->deferred_probe);
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun get_device(dev);
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun kfree(dev->p->deferred_probe_reason);
101*4882a593Smuzhiyun dev->p->deferred_probe_reason = NULL;
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun /*
104*4882a593Smuzhiyun * Drop the mutex while probing each device; the probe path may
105*4882a593Smuzhiyun * manipulate the deferred list
106*4882a593Smuzhiyun */
107*4882a593Smuzhiyun mutex_unlock(&deferred_probe_mutex);
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun /*
110*4882a593Smuzhiyun * Force the device to the end of the dpm_list since
111*4882a593Smuzhiyun * the PM code assumes that the order we add things to
112*4882a593Smuzhiyun * the list is a good order for suspend but deferred
113*4882a593Smuzhiyun * probe makes that very unsafe.
114*4882a593Smuzhiyun */
115*4882a593Smuzhiyun device_pm_move_to_tail(dev);
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun dev_dbg(dev, "Retrying from deferred list\n");
118*4882a593Smuzhiyun bus_probe_device(dev);
119*4882a593Smuzhiyun mutex_lock(&deferred_probe_mutex);
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun put_device(dev);
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun mutex_unlock(&deferred_probe_mutex);
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun static DECLARE_WORK(deferred_probe_work, deferred_probe_work_func);
126*4882a593Smuzhiyun
driver_deferred_probe_add(struct device * dev)127*4882a593Smuzhiyun void driver_deferred_probe_add(struct device *dev)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun mutex_lock(&deferred_probe_mutex);
130*4882a593Smuzhiyun if (list_empty(&dev->p->deferred_probe)) {
131*4882a593Smuzhiyun dev_dbg(dev, "Added to deferred list\n");
132*4882a593Smuzhiyun list_add_tail(&dev->p->deferred_probe, &deferred_probe_pending_list);
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun mutex_unlock(&deferred_probe_mutex);
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
driver_deferred_probe_del(struct device * dev)137*4882a593Smuzhiyun void driver_deferred_probe_del(struct device *dev)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun mutex_lock(&deferred_probe_mutex);
140*4882a593Smuzhiyun if (!list_empty(&dev->p->deferred_probe)) {
141*4882a593Smuzhiyun dev_dbg(dev, "Removed from deferred list\n");
142*4882a593Smuzhiyun list_del_init(&dev->p->deferred_probe);
143*4882a593Smuzhiyun kfree(dev->p->deferred_probe_reason);
144*4882a593Smuzhiyun dev->p->deferred_probe_reason = NULL;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun mutex_unlock(&deferred_probe_mutex);
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun static bool driver_deferred_probe_enable = false;
150*4882a593Smuzhiyun /**
151*4882a593Smuzhiyun * driver_deferred_probe_trigger() - Kick off re-probing deferred devices
152*4882a593Smuzhiyun *
153*4882a593Smuzhiyun * This functions moves all devices from the pending list to the active
154*4882a593Smuzhiyun * list and schedules the deferred probe workqueue to process them. It
155*4882a593Smuzhiyun * should be called anytime a driver is successfully bound to a device.
156*4882a593Smuzhiyun *
157*4882a593Smuzhiyun * Note, there is a race condition in multi-threaded probe. In the case where
158*4882a593Smuzhiyun * more than one device is probing at the same time, it is possible for one
159*4882a593Smuzhiyun * probe to complete successfully while another is about to defer. If the second
160*4882a593Smuzhiyun * depends on the first, then it will get put on the pending list after the
161*4882a593Smuzhiyun * trigger event has already occurred and will be stuck there.
162*4882a593Smuzhiyun *
163*4882a593Smuzhiyun * The atomic 'deferred_trigger_count' is used to determine if a successful
164*4882a593Smuzhiyun * trigger has occurred in the midst of probing a driver. If the trigger count
165*4882a593Smuzhiyun * changes in the midst of a probe, then deferred processing should be triggered
166*4882a593Smuzhiyun * again.
167*4882a593Smuzhiyun */
driver_deferred_probe_trigger(void)168*4882a593Smuzhiyun static void driver_deferred_probe_trigger(void)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun if (!driver_deferred_probe_enable)
171*4882a593Smuzhiyun return;
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun /*
174*4882a593Smuzhiyun * A successful probe means that all the devices in the pending list
175*4882a593Smuzhiyun * should be triggered to be reprobed. Move all the deferred devices
176*4882a593Smuzhiyun * into the active list so they can be retried by the workqueue
177*4882a593Smuzhiyun */
178*4882a593Smuzhiyun mutex_lock(&deferred_probe_mutex);
179*4882a593Smuzhiyun atomic_inc(&deferred_trigger_count);
180*4882a593Smuzhiyun list_splice_tail_init(&deferred_probe_pending_list,
181*4882a593Smuzhiyun &deferred_probe_active_list);
182*4882a593Smuzhiyun mutex_unlock(&deferred_probe_mutex);
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun /*
185*4882a593Smuzhiyun * Kick the re-probe thread. It may already be scheduled, but it is
186*4882a593Smuzhiyun * safe to kick it again.
187*4882a593Smuzhiyun */
188*4882a593Smuzhiyun queue_work(system_unbound_wq, &deferred_probe_work);
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun /**
192*4882a593Smuzhiyun * device_block_probing() - Block/defer device's probes
193*4882a593Smuzhiyun *
194*4882a593Smuzhiyun * It will disable probing of devices and defer their probes instead.
195*4882a593Smuzhiyun */
device_block_probing(void)196*4882a593Smuzhiyun void device_block_probing(void)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun defer_all_probes = true;
199*4882a593Smuzhiyun /* sync with probes to avoid races. */
200*4882a593Smuzhiyun wait_for_device_probe();
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun /**
204*4882a593Smuzhiyun * device_unblock_probing() - Unblock/enable device's probes
205*4882a593Smuzhiyun *
206*4882a593Smuzhiyun * It will restore normal behavior and trigger re-probing of deferred
207*4882a593Smuzhiyun * devices.
208*4882a593Smuzhiyun */
device_unblock_probing(void)209*4882a593Smuzhiyun void device_unblock_probing(void)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun defer_all_probes = false;
212*4882a593Smuzhiyun driver_deferred_probe_trigger();
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun /**
216*4882a593Smuzhiyun * device_set_deferred_probe_reason() - Set defer probe reason message for device
217*4882a593Smuzhiyun * @dev: the pointer to the struct device
218*4882a593Smuzhiyun * @vaf: the pointer to va_format structure with message
219*4882a593Smuzhiyun */
device_set_deferred_probe_reason(const struct device * dev,struct va_format * vaf)220*4882a593Smuzhiyun void device_set_deferred_probe_reason(const struct device *dev, struct va_format *vaf)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun const char *drv = dev_driver_string(dev);
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun mutex_lock(&deferred_probe_mutex);
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun kfree(dev->p->deferred_probe_reason);
227*4882a593Smuzhiyun dev->p->deferred_probe_reason = kasprintf(GFP_KERNEL, "%s: %pV", drv, vaf);
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun mutex_unlock(&deferred_probe_mutex);
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun /*
233*4882a593Smuzhiyun * deferred_devs_show() - Show the devices in the deferred probe pending list.
234*4882a593Smuzhiyun */
deferred_devs_show(struct seq_file * s,void * data)235*4882a593Smuzhiyun static int deferred_devs_show(struct seq_file *s, void *data)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun struct device_private *curr;
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun mutex_lock(&deferred_probe_mutex);
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun list_for_each_entry(curr, &deferred_probe_pending_list, deferred_probe)
242*4882a593Smuzhiyun seq_printf(s, "%s\t%s", dev_name(curr->device),
243*4882a593Smuzhiyun curr->device->p->deferred_probe_reason ?: "\n");
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun mutex_unlock(&deferred_probe_mutex);
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun return 0;
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun DEFINE_SHOW_ATTRIBUTE(deferred_devs);
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun int driver_deferred_probe_timeout;
252*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(driver_deferred_probe_timeout);
253*4882a593Smuzhiyun
deferred_probe_timeout_setup(char * str)254*4882a593Smuzhiyun static int __init deferred_probe_timeout_setup(char *str)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun int timeout;
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun if (!kstrtoint(str, 10, &timeout))
259*4882a593Smuzhiyun driver_deferred_probe_timeout = timeout;
260*4882a593Smuzhiyun return 1;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun __setup("deferred_probe_timeout=", deferred_probe_timeout_setup);
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun /**
265*4882a593Smuzhiyun * driver_deferred_probe_check_state() - Check deferred probe state
266*4882a593Smuzhiyun * @dev: device to check
267*4882a593Smuzhiyun *
268*4882a593Smuzhiyun * Return:
269*4882a593Smuzhiyun * -ENODEV if initcalls have completed and modules are disabled.
270*4882a593Smuzhiyun * -ETIMEDOUT if the deferred probe timeout was set and has expired
271*4882a593Smuzhiyun * and modules are enabled.
272*4882a593Smuzhiyun * -EPROBE_DEFER in other cases.
273*4882a593Smuzhiyun *
274*4882a593Smuzhiyun * Drivers or subsystems can opt-in to calling this function instead of directly
275*4882a593Smuzhiyun * returning -EPROBE_DEFER.
276*4882a593Smuzhiyun */
driver_deferred_probe_check_state(struct device * dev)277*4882a593Smuzhiyun int driver_deferred_probe_check_state(struct device *dev)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun if (!IS_ENABLED(CONFIG_MODULES) && initcalls_done) {
280*4882a593Smuzhiyun dev_warn(dev, "ignoring dependency for device, assuming no driver\n");
281*4882a593Smuzhiyun return -ENODEV;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun if (!driver_deferred_probe_timeout && initcalls_done) {
285*4882a593Smuzhiyun dev_warn(dev, "deferred probe timeout, ignoring dependency\n");
286*4882a593Smuzhiyun return -ETIMEDOUT;
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun return -EPROBE_DEFER;
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun
deferred_probe_timeout_work_func(struct work_struct * work)292*4882a593Smuzhiyun static void deferred_probe_timeout_work_func(struct work_struct *work)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun struct device_private *p;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun driver_deferred_probe_timeout = 0;
297*4882a593Smuzhiyun driver_deferred_probe_trigger();
298*4882a593Smuzhiyun flush_work(&deferred_probe_work);
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun mutex_lock(&deferred_probe_mutex);
301*4882a593Smuzhiyun list_for_each_entry(p, &deferred_probe_pending_list, deferred_probe)
302*4882a593Smuzhiyun dev_info(p->device, "deferred probe pending\n");
303*4882a593Smuzhiyun mutex_unlock(&deferred_probe_mutex);
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func);
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun /**
308*4882a593Smuzhiyun * deferred_probe_initcall() - Enable probing of deferred devices
309*4882a593Smuzhiyun *
310*4882a593Smuzhiyun * We don't want to get in the way when the bulk of drivers are getting probed.
311*4882a593Smuzhiyun * Instead, this initcall makes sure that deferred probing is delayed until
312*4882a593Smuzhiyun * late_initcall time.
313*4882a593Smuzhiyun */
deferred_probe_initcall(void)314*4882a593Smuzhiyun static int deferred_probe_initcall(void)
315*4882a593Smuzhiyun {
316*4882a593Smuzhiyun deferred_devices = debugfs_create_file("devices_deferred", 0444, NULL,
317*4882a593Smuzhiyun NULL, &deferred_devs_fops);
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun driver_deferred_probe_enable = true;
320*4882a593Smuzhiyun driver_deferred_probe_trigger();
321*4882a593Smuzhiyun /* Sort as many dependencies as possible before exiting initcalls */
322*4882a593Smuzhiyun flush_work(&deferred_probe_work);
323*4882a593Smuzhiyun initcalls_done = true;
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun /*
326*4882a593Smuzhiyun * Trigger deferred probe again, this time we won't defer anything
327*4882a593Smuzhiyun * that is optional
328*4882a593Smuzhiyun */
329*4882a593Smuzhiyun driver_deferred_probe_trigger();
330*4882a593Smuzhiyun flush_work(&deferred_probe_work);
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun if (driver_deferred_probe_timeout > 0) {
333*4882a593Smuzhiyun schedule_delayed_work(&deferred_probe_timeout_work,
334*4882a593Smuzhiyun driver_deferred_probe_timeout * HZ);
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun return 0;
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun late_initcall(deferred_probe_initcall);
339*4882a593Smuzhiyun
deferred_probe_exit(void)340*4882a593Smuzhiyun static void __exit deferred_probe_exit(void)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun debugfs_remove_recursive(deferred_devices);
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun __exitcall(deferred_probe_exit);
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun /**
347*4882a593Smuzhiyun * device_is_bound() - Check if device is bound to a driver
348*4882a593Smuzhiyun * @dev: device to check
349*4882a593Smuzhiyun *
350*4882a593Smuzhiyun * Returns true if passed device has already finished probing successfully
351*4882a593Smuzhiyun * against a driver.
352*4882a593Smuzhiyun *
353*4882a593Smuzhiyun * This function must be called with the device lock held.
354*4882a593Smuzhiyun */
device_is_bound(struct device * dev)355*4882a593Smuzhiyun bool device_is_bound(struct device *dev)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun return dev->p && klist_node_attached(&dev->p->knode_driver);
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun
driver_bound(struct device * dev)360*4882a593Smuzhiyun static void driver_bound(struct device *dev)
361*4882a593Smuzhiyun {
362*4882a593Smuzhiyun if (device_is_bound(dev)) {
363*4882a593Smuzhiyun pr_warn("%s: device %s already bound\n",
364*4882a593Smuzhiyun __func__, kobject_name(&dev->kobj));
365*4882a593Smuzhiyun return;
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun pr_debug("driver: '%s': %s: bound to device '%s'\n", dev->driver->name,
369*4882a593Smuzhiyun __func__, dev_name(dev));
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
372*4882a593Smuzhiyun device_links_driver_bound(dev);
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun device_pm_check_callbacks(dev);
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun /*
377*4882a593Smuzhiyun * Make sure the device is no longer in one of the deferred lists and
378*4882a593Smuzhiyun * kick off retrying all pending devices
379*4882a593Smuzhiyun */
380*4882a593Smuzhiyun driver_deferred_probe_del(dev);
381*4882a593Smuzhiyun driver_deferred_probe_trigger();
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun if (dev->bus)
384*4882a593Smuzhiyun blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
385*4882a593Smuzhiyun BUS_NOTIFY_BOUND_DRIVER, dev);
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun kobject_uevent(&dev->kobj, KOBJ_BIND);
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun
coredump_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)390*4882a593Smuzhiyun static ssize_t coredump_store(struct device *dev, struct device_attribute *attr,
391*4882a593Smuzhiyun const char *buf, size_t count)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun device_lock(dev);
394*4882a593Smuzhiyun dev->driver->coredump(dev);
395*4882a593Smuzhiyun device_unlock(dev);
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun return count;
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun static DEVICE_ATTR_WO(coredump);
400*4882a593Smuzhiyun
driver_sysfs_add(struct device * dev)401*4882a593Smuzhiyun static int driver_sysfs_add(struct device *dev)
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun int ret;
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun if (dev->bus)
406*4882a593Smuzhiyun blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
407*4882a593Smuzhiyun BUS_NOTIFY_BIND_DRIVER, dev);
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun ret = sysfs_create_link(&dev->driver->p->kobj, &dev->kobj,
410*4882a593Smuzhiyun kobject_name(&dev->kobj));
411*4882a593Smuzhiyun if (ret)
412*4882a593Smuzhiyun goto fail;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun ret = sysfs_create_link(&dev->kobj, &dev->driver->p->kobj,
415*4882a593Smuzhiyun "driver");
416*4882a593Smuzhiyun if (ret)
417*4882a593Smuzhiyun goto rm_dev;
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun if (!IS_ENABLED(CONFIG_DEV_COREDUMP) || !dev->driver->coredump ||
420*4882a593Smuzhiyun !device_create_file(dev, &dev_attr_coredump))
421*4882a593Smuzhiyun return 0;
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun sysfs_remove_link(&dev->kobj, "driver");
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun rm_dev:
426*4882a593Smuzhiyun sysfs_remove_link(&dev->driver->p->kobj,
427*4882a593Smuzhiyun kobject_name(&dev->kobj));
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun fail:
430*4882a593Smuzhiyun return ret;
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun
driver_sysfs_remove(struct device * dev)433*4882a593Smuzhiyun static void driver_sysfs_remove(struct device *dev)
434*4882a593Smuzhiyun {
435*4882a593Smuzhiyun struct device_driver *drv = dev->driver;
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun if (drv) {
438*4882a593Smuzhiyun if (drv->coredump)
439*4882a593Smuzhiyun device_remove_file(dev, &dev_attr_coredump);
440*4882a593Smuzhiyun sysfs_remove_link(&drv->p->kobj, kobject_name(&dev->kobj));
441*4882a593Smuzhiyun sysfs_remove_link(&dev->kobj, "driver");
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun /**
446*4882a593Smuzhiyun * device_bind_driver - bind a driver to one device.
447*4882a593Smuzhiyun * @dev: device.
448*4882a593Smuzhiyun *
449*4882a593Smuzhiyun * Allow manual attachment of a driver to a device.
450*4882a593Smuzhiyun * Caller must have already set @dev->driver.
451*4882a593Smuzhiyun *
452*4882a593Smuzhiyun * Note that this does not modify the bus reference count.
453*4882a593Smuzhiyun * Please verify that is accounted for before calling this.
454*4882a593Smuzhiyun * (It is ok to call with no other effort from a driver's probe() method.)
455*4882a593Smuzhiyun *
456*4882a593Smuzhiyun * This function must be called with the device lock held.
457*4882a593Smuzhiyun */
device_bind_driver(struct device * dev)458*4882a593Smuzhiyun int device_bind_driver(struct device *dev)
459*4882a593Smuzhiyun {
460*4882a593Smuzhiyun int ret;
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun ret = driver_sysfs_add(dev);
463*4882a593Smuzhiyun if (!ret)
464*4882a593Smuzhiyun driver_bound(dev);
465*4882a593Smuzhiyun else if (dev->bus)
466*4882a593Smuzhiyun blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
467*4882a593Smuzhiyun BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
468*4882a593Smuzhiyun return ret;
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(device_bind_driver);
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun static atomic_t probe_count = ATOMIC_INIT(0);
473*4882a593Smuzhiyun static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue);
474*4882a593Smuzhiyun
driver_deferred_probe_add_trigger(struct device * dev,int local_trigger_count)475*4882a593Smuzhiyun static void driver_deferred_probe_add_trigger(struct device *dev,
476*4882a593Smuzhiyun int local_trigger_count)
477*4882a593Smuzhiyun {
478*4882a593Smuzhiyun driver_deferred_probe_add(dev);
479*4882a593Smuzhiyun /* Did a trigger occur while probing? Need to re-trigger if yes */
480*4882a593Smuzhiyun if (local_trigger_count != atomic_read(&deferred_trigger_count))
481*4882a593Smuzhiyun driver_deferred_probe_trigger();
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun
state_synced_show(struct device * dev,struct device_attribute * attr,char * buf)484*4882a593Smuzhiyun static ssize_t state_synced_show(struct device *dev,
485*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
486*4882a593Smuzhiyun {
487*4882a593Smuzhiyun bool val;
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun device_lock(dev);
490*4882a593Smuzhiyun val = dev->state_synced;
491*4882a593Smuzhiyun device_unlock(dev);
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun return sysfs_emit(buf, "%u\n", val);
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun static DEVICE_ATTR_RO(state_synced);
496*4882a593Smuzhiyun
really_probe(struct device * dev,struct device_driver * drv)497*4882a593Smuzhiyun static int really_probe(struct device *dev, struct device_driver *drv)
498*4882a593Smuzhiyun {
499*4882a593Smuzhiyun int ret = -EPROBE_DEFER;
500*4882a593Smuzhiyun int local_trigger_count = atomic_read(&deferred_trigger_count);
501*4882a593Smuzhiyun bool test_remove = IS_ENABLED(CONFIG_DEBUG_TEST_DRIVER_REMOVE) &&
502*4882a593Smuzhiyun !drv->suppress_bind_attrs;
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun if (defer_all_probes) {
505*4882a593Smuzhiyun /*
506*4882a593Smuzhiyun * Value of defer_all_probes can be set only by
507*4882a593Smuzhiyun * device_block_probing() which, in turn, will call
508*4882a593Smuzhiyun * wait_for_device_probe() right after that to avoid any races.
509*4882a593Smuzhiyun */
510*4882a593Smuzhiyun dev_dbg(dev, "Driver %s force probe deferral\n", drv->name);
511*4882a593Smuzhiyun driver_deferred_probe_add(dev);
512*4882a593Smuzhiyun return ret;
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun ret = device_links_check_suppliers(dev);
516*4882a593Smuzhiyun if (ret == -EPROBE_DEFER)
517*4882a593Smuzhiyun driver_deferred_probe_add_trigger(dev, local_trigger_count);
518*4882a593Smuzhiyun if (ret)
519*4882a593Smuzhiyun return ret;
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun atomic_inc(&probe_count);
522*4882a593Smuzhiyun pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
523*4882a593Smuzhiyun drv->bus->name, __func__, drv->name, dev_name(dev));
524*4882a593Smuzhiyun if (!list_empty(&dev->devres_head)) {
525*4882a593Smuzhiyun dev_crit(dev, "Resources present before probing\n");
526*4882a593Smuzhiyun ret = -EBUSY;
527*4882a593Smuzhiyun goto done;
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun re_probe:
531*4882a593Smuzhiyun dev->driver = drv;
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun /* If using pinctrl, bind pins now before probing */
534*4882a593Smuzhiyun ret = pinctrl_bind_pins(dev);
535*4882a593Smuzhiyun if (ret)
536*4882a593Smuzhiyun goto pinctrl_bind_failed;
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun if (dev->bus->dma_configure) {
539*4882a593Smuzhiyun ret = dev->bus->dma_configure(dev);
540*4882a593Smuzhiyun if (ret)
541*4882a593Smuzhiyun goto probe_failed;
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun ret = driver_sysfs_add(dev);
545*4882a593Smuzhiyun if (ret) {
546*4882a593Smuzhiyun pr_err("%s: driver_sysfs_add(%s) failed\n",
547*4882a593Smuzhiyun __func__, dev_name(dev));
548*4882a593Smuzhiyun goto probe_failed;
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun if (dev->pm_domain && dev->pm_domain->activate) {
552*4882a593Smuzhiyun ret = dev->pm_domain->activate(dev);
553*4882a593Smuzhiyun if (ret)
554*4882a593Smuzhiyun goto probe_failed;
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun if (dev->bus->probe) {
558*4882a593Smuzhiyun ret = dev->bus->probe(dev);
559*4882a593Smuzhiyun if (ret)
560*4882a593Smuzhiyun goto probe_failed;
561*4882a593Smuzhiyun } else if (drv->probe) {
562*4882a593Smuzhiyun ret = drv->probe(dev);
563*4882a593Smuzhiyun if (ret)
564*4882a593Smuzhiyun goto probe_failed;
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun ret = device_add_groups(dev, drv->dev_groups);
568*4882a593Smuzhiyun if (ret) {
569*4882a593Smuzhiyun dev_err(dev, "device_add_groups() failed\n");
570*4882a593Smuzhiyun goto dev_groups_failed;
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun if (dev_has_sync_state(dev)) {
574*4882a593Smuzhiyun ret = device_create_file(dev, &dev_attr_state_synced);
575*4882a593Smuzhiyun if (ret) {
576*4882a593Smuzhiyun dev_err(dev, "state_synced sysfs add failed\n");
577*4882a593Smuzhiyun goto dev_sysfs_state_synced_failed;
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun if (test_remove) {
582*4882a593Smuzhiyun test_remove = false;
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun device_remove_file(dev, &dev_attr_state_synced);
585*4882a593Smuzhiyun device_remove_groups(dev, drv->dev_groups);
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun if (dev->bus->remove)
588*4882a593Smuzhiyun dev->bus->remove(dev);
589*4882a593Smuzhiyun else if (drv->remove)
590*4882a593Smuzhiyun drv->remove(dev);
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun devres_release_all(dev);
593*4882a593Smuzhiyun arch_teardown_dma_ops(dev);
594*4882a593Smuzhiyun kfree(dev->dma_range_map);
595*4882a593Smuzhiyun dev->dma_range_map = NULL;
596*4882a593Smuzhiyun driver_sysfs_remove(dev);
597*4882a593Smuzhiyun dev->driver = NULL;
598*4882a593Smuzhiyun dev_set_drvdata(dev, NULL);
599*4882a593Smuzhiyun if (dev->pm_domain && dev->pm_domain->dismiss)
600*4882a593Smuzhiyun dev->pm_domain->dismiss(dev);
601*4882a593Smuzhiyun pm_runtime_reinit(dev);
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun goto re_probe;
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun pinctrl_init_done(dev);
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun if (dev->pm_domain && dev->pm_domain->sync)
609*4882a593Smuzhiyun dev->pm_domain->sync(dev);
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun driver_bound(dev);
612*4882a593Smuzhiyun ret = 1;
613*4882a593Smuzhiyun pr_debug("bus: '%s': %s: bound device %s to driver %s\n",
614*4882a593Smuzhiyun drv->bus->name, __func__, dev_name(dev), drv->name);
615*4882a593Smuzhiyun goto done;
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun dev_sysfs_state_synced_failed:
618*4882a593Smuzhiyun device_remove_groups(dev, drv->dev_groups);
619*4882a593Smuzhiyun dev_groups_failed:
620*4882a593Smuzhiyun if (dev->bus->remove)
621*4882a593Smuzhiyun dev->bus->remove(dev);
622*4882a593Smuzhiyun else if (drv->remove)
623*4882a593Smuzhiyun drv->remove(dev);
624*4882a593Smuzhiyun probe_failed:
625*4882a593Smuzhiyun if (dev->bus)
626*4882a593Smuzhiyun blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
627*4882a593Smuzhiyun BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
628*4882a593Smuzhiyun pinctrl_bind_failed:
629*4882a593Smuzhiyun device_links_no_driver(dev);
630*4882a593Smuzhiyun devres_release_all(dev);
631*4882a593Smuzhiyun arch_teardown_dma_ops(dev);
632*4882a593Smuzhiyun kfree(dev->dma_range_map);
633*4882a593Smuzhiyun dev->dma_range_map = NULL;
634*4882a593Smuzhiyun driver_sysfs_remove(dev);
635*4882a593Smuzhiyun dev->driver = NULL;
636*4882a593Smuzhiyun dev_set_drvdata(dev, NULL);
637*4882a593Smuzhiyun if (dev->pm_domain && dev->pm_domain->dismiss)
638*4882a593Smuzhiyun dev->pm_domain->dismiss(dev);
639*4882a593Smuzhiyun pm_runtime_reinit(dev);
640*4882a593Smuzhiyun dev_pm_set_driver_flags(dev, 0);
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun switch (ret) {
643*4882a593Smuzhiyun case -EPROBE_DEFER:
644*4882a593Smuzhiyun /* Driver requested deferred probing */
645*4882a593Smuzhiyun dev_dbg(dev, "Driver %s requests probe deferral\n", drv->name);
646*4882a593Smuzhiyun driver_deferred_probe_add_trigger(dev, local_trigger_count);
647*4882a593Smuzhiyun break;
648*4882a593Smuzhiyun case -ENODEV:
649*4882a593Smuzhiyun case -ENXIO:
650*4882a593Smuzhiyun pr_debug("%s: probe of %s rejects match %d\n",
651*4882a593Smuzhiyun drv->name, dev_name(dev), ret);
652*4882a593Smuzhiyun break;
653*4882a593Smuzhiyun default:
654*4882a593Smuzhiyun /* driver matched but the probe failed */
655*4882a593Smuzhiyun pr_warn("%s: probe of %s failed with error %d\n",
656*4882a593Smuzhiyun drv->name, dev_name(dev), ret);
657*4882a593Smuzhiyun }
658*4882a593Smuzhiyun /*
659*4882a593Smuzhiyun * Ignore errors returned by ->probe so that the next driver can try
660*4882a593Smuzhiyun * its luck.
661*4882a593Smuzhiyun */
662*4882a593Smuzhiyun ret = 0;
663*4882a593Smuzhiyun done:
664*4882a593Smuzhiyun atomic_dec(&probe_count);
665*4882a593Smuzhiyun wake_up_all(&probe_waitqueue);
666*4882a593Smuzhiyun return ret;
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun /*
670*4882a593Smuzhiyun * For initcall_debug, show the driver probe time.
671*4882a593Smuzhiyun */
really_probe_debug(struct device * dev,struct device_driver * drv)672*4882a593Smuzhiyun static int really_probe_debug(struct device *dev, struct device_driver *drv)
673*4882a593Smuzhiyun {
674*4882a593Smuzhiyun ktime_t calltime, rettime;
675*4882a593Smuzhiyun int ret;
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun calltime = ktime_get();
678*4882a593Smuzhiyun ret = really_probe(dev, drv);
679*4882a593Smuzhiyun rettime = ktime_get();
680*4882a593Smuzhiyun pr_debug("probe of %s returned %d after %lld usecs\n",
681*4882a593Smuzhiyun dev_name(dev), ret, ktime_us_delta(rettime, calltime));
682*4882a593Smuzhiyun return ret;
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun /**
686*4882a593Smuzhiyun * driver_probe_done
687*4882a593Smuzhiyun * Determine if the probe sequence is finished or not.
688*4882a593Smuzhiyun *
689*4882a593Smuzhiyun * Should somehow figure out how to use a semaphore, not an atomic variable...
690*4882a593Smuzhiyun */
driver_probe_done(void)691*4882a593Smuzhiyun int driver_probe_done(void)
692*4882a593Smuzhiyun {
693*4882a593Smuzhiyun int local_probe_count = atomic_read(&probe_count);
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun pr_debug("%s: probe_count = %d\n", __func__, local_probe_count);
696*4882a593Smuzhiyun if (local_probe_count)
697*4882a593Smuzhiyun return -EBUSY;
698*4882a593Smuzhiyun return 0;
699*4882a593Smuzhiyun }
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun /**
702*4882a593Smuzhiyun * wait_for_device_probe
703*4882a593Smuzhiyun * Wait for device probing to be completed.
704*4882a593Smuzhiyun */
wait_for_device_probe(void)705*4882a593Smuzhiyun void wait_for_device_probe(void)
706*4882a593Smuzhiyun {
707*4882a593Smuzhiyun /* wait for the deferred probe workqueue to finish */
708*4882a593Smuzhiyun flush_work(&deferred_probe_work);
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun /* wait for the known devices to complete their probing */
711*4882a593Smuzhiyun wait_event(probe_waitqueue, atomic_read(&probe_count) == 0);
712*4882a593Smuzhiyun async_synchronize_full();
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(wait_for_device_probe);
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun /**
717*4882a593Smuzhiyun * driver_probe_device - attempt to bind device & driver together
718*4882a593Smuzhiyun * @drv: driver to bind a device to
719*4882a593Smuzhiyun * @dev: device to try to bind to the driver
720*4882a593Smuzhiyun *
721*4882a593Smuzhiyun * This function returns -ENODEV if the device is not registered,
722*4882a593Smuzhiyun * 1 if the device is bound successfully and 0 otherwise.
723*4882a593Smuzhiyun *
724*4882a593Smuzhiyun * This function must be called with @dev lock held. When called for a
725*4882a593Smuzhiyun * USB interface, @dev->parent lock must be held as well.
726*4882a593Smuzhiyun *
727*4882a593Smuzhiyun * If the device has a parent, runtime-resume the parent before driver probing.
728*4882a593Smuzhiyun */
driver_probe_device(struct device_driver * drv,struct device * dev)729*4882a593Smuzhiyun int driver_probe_device(struct device_driver *drv, struct device *dev)
730*4882a593Smuzhiyun {
731*4882a593Smuzhiyun int ret = 0;
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun if (!device_is_registered(dev))
734*4882a593Smuzhiyun return -ENODEV;
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
737*4882a593Smuzhiyun drv->bus->name, __func__, dev_name(dev), drv->name);
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun pm_runtime_get_suppliers(dev);
740*4882a593Smuzhiyun if (dev->parent)
741*4882a593Smuzhiyun pm_runtime_get_sync(dev->parent);
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun pm_runtime_barrier(dev);
744*4882a593Smuzhiyun if (initcall_debug)
745*4882a593Smuzhiyun ret = really_probe_debug(dev, drv);
746*4882a593Smuzhiyun else
747*4882a593Smuzhiyun ret = really_probe(dev, drv);
748*4882a593Smuzhiyun pm_request_idle(dev);
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun if (dev->parent)
751*4882a593Smuzhiyun pm_runtime_put(dev->parent);
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun pm_runtime_put_suppliers(dev);
754*4882a593Smuzhiyun return ret;
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun
cmdline_requested_async_probing(const char * drv_name)757*4882a593Smuzhiyun static inline bool cmdline_requested_async_probing(const char *drv_name)
758*4882a593Smuzhiyun {
759*4882a593Smuzhiyun return parse_option_str(async_probe_drv_names, drv_name);
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun /* The option format is "driver_async_probe=drv_name1,drv_name2,..." */
save_async_options(char * buf)763*4882a593Smuzhiyun static int __init save_async_options(char *buf)
764*4882a593Smuzhiyun {
765*4882a593Smuzhiyun if (strlen(buf) >= ASYNC_DRV_NAMES_MAX_LEN)
766*4882a593Smuzhiyun pr_warn("Too long list of driver names for 'driver_async_probe'!\n");
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun strlcpy(async_probe_drv_names, buf, ASYNC_DRV_NAMES_MAX_LEN);
769*4882a593Smuzhiyun return 1;
770*4882a593Smuzhiyun }
771*4882a593Smuzhiyun __setup("driver_async_probe=", save_async_options);
772*4882a593Smuzhiyun
driver_allows_async_probing(struct device_driver * drv)773*4882a593Smuzhiyun bool driver_allows_async_probing(struct device_driver *drv)
774*4882a593Smuzhiyun {
775*4882a593Smuzhiyun switch (drv->probe_type) {
776*4882a593Smuzhiyun case PROBE_PREFER_ASYNCHRONOUS:
777*4882a593Smuzhiyun return true;
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun case PROBE_FORCE_SYNCHRONOUS:
780*4882a593Smuzhiyun return false;
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun default:
783*4882a593Smuzhiyun if (cmdline_requested_async_probing(drv->name))
784*4882a593Smuzhiyun return true;
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun if (module_requested_async_probing(drv->owner))
787*4882a593Smuzhiyun return true;
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun return false;
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun }
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun struct device_attach_data {
794*4882a593Smuzhiyun struct device *dev;
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun /*
797*4882a593Smuzhiyun * Indicates whether we are are considering asynchronous probing or
798*4882a593Smuzhiyun * not. Only initial binding after device or driver registration
799*4882a593Smuzhiyun * (including deferral processing) may be done asynchronously, the
800*4882a593Smuzhiyun * rest is always synchronous, as we expect it is being done by
801*4882a593Smuzhiyun * request from userspace.
802*4882a593Smuzhiyun */
803*4882a593Smuzhiyun bool check_async;
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun /*
806*4882a593Smuzhiyun * Indicates if we are binding synchronous or asynchronous drivers.
807*4882a593Smuzhiyun * When asynchronous probing is enabled we'll execute 2 passes
808*4882a593Smuzhiyun * over drivers: first pass doing synchronous probing and second
809*4882a593Smuzhiyun * doing asynchronous probing (if synchronous did not succeed -
810*4882a593Smuzhiyun * most likely because there was no driver requiring synchronous
811*4882a593Smuzhiyun * probing - and we found asynchronous driver during first pass).
812*4882a593Smuzhiyun * The 2 passes are done because we can't shoot asynchronous
813*4882a593Smuzhiyun * probe for given device and driver from bus_for_each_drv() since
814*4882a593Smuzhiyun * driver pointer is not guaranteed to stay valid once
815*4882a593Smuzhiyun * bus_for_each_drv() iterates to the next driver on the bus.
816*4882a593Smuzhiyun */
817*4882a593Smuzhiyun bool want_async;
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun /*
820*4882a593Smuzhiyun * We'll set have_async to 'true' if, while scanning for matching
821*4882a593Smuzhiyun * driver, we'll encounter one that requests asynchronous probing.
822*4882a593Smuzhiyun */
823*4882a593Smuzhiyun bool have_async;
824*4882a593Smuzhiyun };
825*4882a593Smuzhiyun
__device_attach_driver(struct device_driver * drv,void * _data)826*4882a593Smuzhiyun static int __device_attach_driver(struct device_driver *drv, void *_data)
827*4882a593Smuzhiyun {
828*4882a593Smuzhiyun struct device_attach_data *data = _data;
829*4882a593Smuzhiyun struct device *dev = data->dev;
830*4882a593Smuzhiyun bool async_allowed;
831*4882a593Smuzhiyun int ret;
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun ret = driver_match_device(drv, dev);
834*4882a593Smuzhiyun if (ret == 0) {
835*4882a593Smuzhiyun /* no match */
836*4882a593Smuzhiyun return 0;
837*4882a593Smuzhiyun } else if (ret == -EPROBE_DEFER) {
838*4882a593Smuzhiyun dev_dbg(dev, "Device match requests probe deferral\n");
839*4882a593Smuzhiyun driver_deferred_probe_add(dev);
840*4882a593Smuzhiyun /*
841*4882a593Smuzhiyun * Device can't match with a driver right now, so don't attempt
842*4882a593Smuzhiyun * to match or bind with other drivers on the bus.
843*4882a593Smuzhiyun */
844*4882a593Smuzhiyun return ret;
845*4882a593Smuzhiyun } else if (ret < 0) {
846*4882a593Smuzhiyun dev_dbg(dev, "Bus failed to match device: %d\n", ret);
847*4882a593Smuzhiyun return ret;
848*4882a593Smuzhiyun } /* ret > 0 means positive match */
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun async_allowed = driver_allows_async_probing(drv);
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun if (async_allowed)
853*4882a593Smuzhiyun data->have_async = true;
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun if (data->check_async && async_allowed != data->want_async)
856*4882a593Smuzhiyun return 0;
857*4882a593Smuzhiyun
858*4882a593Smuzhiyun return driver_probe_device(drv, dev);
859*4882a593Smuzhiyun }
860*4882a593Smuzhiyun
__device_attach_async_helper(void * _dev,async_cookie_t cookie)861*4882a593Smuzhiyun static void __device_attach_async_helper(void *_dev, async_cookie_t cookie)
862*4882a593Smuzhiyun {
863*4882a593Smuzhiyun struct device *dev = _dev;
864*4882a593Smuzhiyun struct device_attach_data data = {
865*4882a593Smuzhiyun .dev = dev,
866*4882a593Smuzhiyun .check_async = true,
867*4882a593Smuzhiyun .want_async = true,
868*4882a593Smuzhiyun };
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun device_lock(dev);
871*4882a593Smuzhiyun
872*4882a593Smuzhiyun /*
873*4882a593Smuzhiyun * Check if device has already been removed or claimed. This may
874*4882a593Smuzhiyun * happen with driver loading, device discovery/registration,
875*4882a593Smuzhiyun * and deferred probe processing happens all at once with
876*4882a593Smuzhiyun * multiple threads.
877*4882a593Smuzhiyun */
878*4882a593Smuzhiyun if (dev->p->dead || dev->driver)
879*4882a593Smuzhiyun goto out_unlock;
880*4882a593Smuzhiyun
881*4882a593Smuzhiyun if (dev->parent)
882*4882a593Smuzhiyun pm_runtime_get_sync(dev->parent);
883*4882a593Smuzhiyun
884*4882a593Smuzhiyun bus_for_each_drv(dev->bus, NULL, &data, __device_attach_driver);
885*4882a593Smuzhiyun dev_dbg(dev, "async probe completed\n");
886*4882a593Smuzhiyun
887*4882a593Smuzhiyun pm_request_idle(dev);
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun if (dev->parent)
890*4882a593Smuzhiyun pm_runtime_put(dev->parent);
891*4882a593Smuzhiyun out_unlock:
892*4882a593Smuzhiyun device_unlock(dev);
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun put_device(dev);
895*4882a593Smuzhiyun }
896*4882a593Smuzhiyun
__device_attach(struct device * dev,bool allow_async)897*4882a593Smuzhiyun static int __device_attach(struct device *dev, bool allow_async)
898*4882a593Smuzhiyun {
899*4882a593Smuzhiyun int ret = 0;
900*4882a593Smuzhiyun bool async = false;
901*4882a593Smuzhiyun
902*4882a593Smuzhiyun device_lock(dev);
903*4882a593Smuzhiyun if (dev->p->dead) {
904*4882a593Smuzhiyun goto out_unlock;
905*4882a593Smuzhiyun } else if (dev->driver) {
906*4882a593Smuzhiyun if (device_is_bound(dev)) {
907*4882a593Smuzhiyun ret = 1;
908*4882a593Smuzhiyun goto out_unlock;
909*4882a593Smuzhiyun }
910*4882a593Smuzhiyun ret = device_bind_driver(dev);
911*4882a593Smuzhiyun if (ret == 0)
912*4882a593Smuzhiyun ret = 1;
913*4882a593Smuzhiyun else {
914*4882a593Smuzhiyun dev->driver = NULL;
915*4882a593Smuzhiyun ret = 0;
916*4882a593Smuzhiyun }
917*4882a593Smuzhiyun } else {
918*4882a593Smuzhiyun struct device_attach_data data = {
919*4882a593Smuzhiyun .dev = dev,
920*4882a593Smuzhiyun .check_async = allow_async,
921*4882a593Smuzhiyun .want_async = false,
922*4882a593Smuzhiyun };
923*4882a593Smuzhiyun
924*4882a593Smuzhiyun if (dev->parent)
925*4882a593Smuzhiyun pm_runtime_get_sync(dev->parent);
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun ret = bus_for_each_drv(dev->bus, NULL, &data,
928*4882a593Smuzhiyun __device_attach_driver);
929*4882a593Smuzhiyun if (!ret && allow_async && data.have_async) {
930*4882a593Smuzhiyun /*
931*4882a593Smuzhiyun * If we could not find appropriate driver
932*4882a593Smuzhiyun * synchronously and we are allowed to do
933*4882a593Smuzhiyun * async probes and there are drivers that
934*4882a593Smuzhiyun * want to probe asynchronously, we'll
935*4882a593Smuzhiyun * try them.
936*4882a593Smuzhiyun */
937*4882a593Smuzhiyun dev_dbg(dev, "scheduling asynchronous probe\n");
938*4882a593Smuzhiyun get_device(dev);
939*4882a593Smuzhiyun async = true;
940*4882a593Smuzhiyun } else {
941*4882a593Smuzhiyun pm_request_idle(dev);
942*4882a593Smuzhiyun }
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun if (dev->parent)
945*4882a593Smuzhiyun pm_runtime_put(dev->parent);
946*4882a593Smuzhiyun }
947*4882a593Smuzhiyun out_unlock:
948*4882a593Smuzhiyun device_unlock(dev);
949*4882a593Smuzhiyun if (async)
950*4882a593Smuzhiyun async_schedule_dev(__device_attach_async_helper, dev);
951*4882a593Smuzhiyun return ret;
952*4882a593Smuzhiyun }
953*4882a593Smuzhiyun
954*4882a593Smuzhiyun /**
955*4882a593Smuzhiyun * device_attach - try to attach device to a driver.
956*4882a593Smuzhiyun * @dev: device.
957*4882a593Smuzhiyun *
958*4882a593Smuzhiyun * Walk the list of drivers that the bus has and call
959*4882a593Smuzhiyun * driver_probe_device() for each pair. If a compatible
960*4882a593Smuzhiyun * pair is found, break out and return.
961*4882a593Smuzhiyun *
962*4882a593Smuzhiyun * Returns 1 if the device was bound to a driver;
963*4882a593Smuzhiyun * 0 if no matching driver was found;
964*4882a593Smuzhiyun * -ENODEV if the device is not registered.
965*4882a593Smuzhiyun *
966*4882a593Smuzhiyun * When called for a USB interface, @dev->parent lock must be held.
967*4882a593Smuzhiyun */
device_attach(struct device * dev)968*4882a593Smuzhiyun int device_attach(struct device *dev)
969*4882a593Smuzhiyun {
970*4882a593Smuzhiyun return __device_attach(dev, false);
971*4882a593Smuzhiyun }
972*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(device_attach);
973*4882a593Smuzhiyun
device_initial_probe(struct device * dev)974*4882a593Smuzhiyun void device_initial_probe(struct device *dev)
975*4882a593Smuzhiyun {
976*4882a593Smuzhiyun __device_attach(dev, true);
977*4882a593Smuzhiyun }
978*4882a593Smuzhiyun
979*4882a593Smuzhiyun /*
980*4882a593Smuzhiyun * __device_driver_lock - acquire locks needed to manipulate dev->drv
981*4882a593Smuzhiyun * @dev: Device we will update driver info for
982*4882a593Smuzhiyun * @parent: Parent device. Needed if the bus requires parent lock
983*4882a593Smuzhiyun *
984*4882a593Smuzhiyun * This function will take the required locks for manipulating dev->drv.
985*4882a593Smuzhiyun * Normally this will just be the @dev lock, but when called for a USB
986*4882a593Smuzhiyun * interface, @parent lock will be held as well.
987*4882a593Smuzhiyun */
__device_driver_lock(struct device * dev,struct device * parent)988*4882a593Smuzhiyun static void __device_driver_lock(struct device *dev, struct device *parent)
989*4882a593Smuzhiyun {
990*4882a593Smuzhiyun if (parent && dev->bus->need_parent_lock)
991*4882a593Smuzhiyun device_lock(parent);
992*4882a593Smuzhiyun device_lock(dev);
993*4882a593Smuzhiyun }
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun /*
996*4882a593Smuzhiyun * __device_driver_unlock - release locks needed to manipulate dev->drv
997*4882a593Smuzhiyun * @dev: Device we will update driver info for
998*4882a593Smuzhiyun * @parent: Parent device. Needed if the bus requires parent lock
999*4882a593Smuzhiyun *
1000*4882a593Smuzhiyun * This function will release the required locks for manipulating dev->drv.
1001*4882a593Smuzhiyun * Normally this will just be the the @dev lock, but when called for a
1002*4882a593Smuzhiyun * USB interface, @parent lock will be released as well.
1003*4882a593Smuzhiyun */
__device_driver_unlock(struct device * dev,struct device * parent)1004*4882a593Smuzhiyun static void __device_driver_unlock(struct device *dev, struct device *parent)
1005*4882a593Smuzhiyun {
1006*4882a593Smuzhiyun device_unlock(dev);
1007*4882a593Smuzhiyun if (parent && dev->bus->need_parent_lock)
1008*4882a593Smuzhiyun device_unlock(parent);
1009*4882a593Smuzhiyun }
1010*4882a593Smuzhiyun
1011*4882a593Smuzhiyun /**
1012*4882a593Smuzhiyun * device_driver_attach - attach a specific driver to a specific device
1013*4882a593Smuzhiyun * @drv: Driver to attach
1014*4882a593Smuzhiyun * @dev: Device to attach it to
1015*4882a593Smuzhiyun *
1016*4882a593Smuzhiyun * Manually attach driver to a device. Will acquire both @dev lock and
1017*4882a593Smuzhiyun * @dev->parent lock if needed.
1018*4882a593Smuzhiyun */
device_driver_attach(struct device_driver * drv,struct device * dev)1019*4882a593Smuzhiyun int device_driver_attach(struct device_driver *drv, struct device *dev)
1020*4882a593Smuzhiyun {
1021*4882a593Smuzhiyun int ret = 0;
1022*4882a593Smuzhiyun
1023*4882a593Smuzhiyun __device_driver_lock(dev, dev->parent);
1024*4882a593Smuzhiyun
1025*4882a593Smuzhiyun /*
1026*4882a593Smuzhiyun * If device has been removed or someone has already successfully
1027*4882a593Smuzhiyun * bound a driver before us just skip the driver probe call.
1028*4882a593Smuzhiyun */
1029*4882a593Smuzhiyun if (!dev->p->dead && !dev->driver)
1030*4882a593Smuzhiyun ret = driver_probe_device(drv, dev);
1031*4882a593Smuzhiyun
1032*4882a593Smuzhiyun __device_driver_unlock(dev, dev->parent);
1033*4882a593Smuzhiyun
1034*4882a593Smuzhiyun return ret;
1035*4882a593Smuzhiyun }
1036*4882a593Smuzhiyun
__driver_attach_async_helper(void * _dev,async_cookie_t cookie)1037*4882a593Smuzhiyun static void __driver_attach_async_helper(void *_dev, async_cookie_t cookie)
1038*4882a593Smuzhiyun {
1039*4882a593Smuzhiyun struct device *dev = _dev;
1040*4882a593Smuzhiyun struct device_driver *drv;
1041*4882a593Smuzhiyun int ret = 0;
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun __device_driver_lock(dev, dev->parent);
1044*4882a593Smuzhiyun
1045*4882a593Smuzhiyun drv = dev->p->async_driver;
1046*4882a593Smuzhiyun
1047*4882a593Smuzhiyun /*
1048*4882a593Smuzhiyun * If device has been removed or someone has already successfully
1049*4882a593Smuzhiyun * bound a driver before us just skip the driver probe call.
1050*4882a593Smuzhiyun */
1051*4882a593Smuzhiyun if (!dev->p->dead && !dev->driver)
1052*4882a593Smuzhiyun ret = driver_probe_device(drv, dev);
1053*4882a593Smuzhiyun
1054*4882a593Smuzhiyun __device_driver_unlock(dev, dev->parent);
1055*4882a593Smuzhiyun
1056*4882a593Smuzhiyun dev_dbg(dev, "driver %s async attach completed: %d\n", drv->name, ret);
1057*4882a593Smuzhiyun
1058*4882a593Smuzhiyun put_device(dev);
1059*4882a593Smuzhiyun }
1060*4882a593Smuzhiyun
__driver_attach(struct device * dev,void * data)1061*4882a593Smuzhiyun static int __driver_attach(struct device *dev, void *data)
1062*4882a593Smuzhiyun {
1063*4882a593Smuzhiyun struct device_driver *drv = data;
1064*4882a593Smuzhiyun bool async = false;
1065*4882a593Smuzhiyun int ret;
1066*4882a593Smuzhiyun
1067*4882a593Smuzhiyun /*
1068*4882a593Smuzhiyun * Lock device and try to bind to it. We drop the error
1069*4882a593Smuzhiyun * here and always return 0, because we need to keep trying
1070*4882a593Smuzhiyun * to bind to devices and some drivers will return an error
1071*4882a593Smuzhiyun * simply if it didn't support the device.
1072*4882a593Smuzhiyun *
1073*4882a593Smuzhiyun * driver_probe_device() will spit a warning if there
1074*4882a593Smuzhiyun * is an error.
1075*4882a593Smuzhiyun */
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun ret = driver_match_device(drv, dev);
1078*4882a593Smuzhiyun if (ret == 0) {
1079*4882a593Smuzhiyun /* no match */
1080*4882a593Smuzhiyun return 0;
1081*4882a593Smuzhiyun } else if (ret == -EPROBE_DEFER) {
1082*4882a593Smuzhiyun dev_dbg(dev, "Device match requests probe deferral\n");
1083*4882a593Smuzhiyun driver_deferred_probe_add(dev);
1084*4882a593Smuzhiyun /*
1085*4882a593Smuzhiyun * Driver could not match with device, but may match with
1086*4882a593Smuzhiyun * another device on the bus.
1087*4882a593Smuzhiyun */
1088*4882a593Smuzhiyun return 0;
1089*4882a593Smuzhiyun } else if (ret < 0) {
1090*4882a593Smuzhiyun dev_dbg(dev, "Bus failed to match device: %d\n", ret);
1091*4882a593Smuzhiyun return ret;
1092*4882a593Smuzhiyun } /* ret > 0 means positive match */
1093*4882a593Smuzhiyun
1094*4882a593Smuzhiyun if (driver_allows_async_probing(drv)) {
1095*4882a593Smuzhiyun /*
1096*4882a593Smuzhiyun * Instead of probing the device synchronously we will
1097*4882a593Smuzhiyun * probe it asynchronously to allow for more parallelism.
1098*4882a593Smuzhiyun *
1099*4882a593Smuzhiyun * We only take the device lock here in order to guarantee
1100*4882a593Smuzhiyun * that the dev->driver and async_driver fields are protected
1101*4882a593Smuzhiyun */
1102*4882a593Smuzhiyun dev_dbg(dev, "probing driver %s asynchronously\n", drv->name);
1103*4882a593Smuzhiyun device_lock(dev);
1104*4882a593Smuzhiyun if (!dev->driver) {
1105*4882a593Smuzhiyun get_device(dev);
1106*4882a593Smuzhiyun dev->p->async_driver = drv;
1107*4882a593Smuzhiyun async = true;
1108*4882a593Smuzhiyun }
1109*4882a593Smuzhiyun device_unlock(dev);
1110*4882a593Smuzhiyun if (async)
1111*4882a593Smuzhiyun async_schedule_dev(__driver_attach_async_helper, dev);
1112*4882a593Smuzhiyun return 0;
1113*4882a593Smuzhiyun }
1114*4882a593Smuzhiyun
1115*4882a593Smuzhiyun device_driver_attach(drv, dev);
1116*4882a593Smuzhiyun
1117*4882a593Smuzhiyun return 0;
1118*4882a593Smuzhiyun }
1119*4882a593Smuzhiyun
1120*4882a593Smuzhiyun /**
1121*4882a593Smuzhiyun * driver_attach - try to bind driver to devices.
1122*4882a593Smuzhiyun * @drv: driver.
1123*4882a593Smuzhiyun *
1124*4882a593Smuzhiyun * Walk the list of devices that the bus has on it and try to
1125*4882a593Smuzhiyun * match the driver with each one. If driver_probe_device()
1126*4882a593Smuzhiyun * returns 0 and the @dev->driver is set, we've found a
1127*4882a593Smuzhiyun * compatible pair.
1128*4882a593Smuzhiyun */
driver_attach(struct device_driver * drv)1129*4882a593Smuzhiyun int driver_attach(struct device_driver *drv)
1130*4882a593Smuzhiyun {
1131*4882a593Smuzhiyun return bus_for_each_dev(drv->bus, NULL, drv, __driver_attach);
1132*4882a593Smuzhiyun }
1133*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(driver_attach);
1134*4882a593Smuzhiyun
1135*4882a593Smuzhiyun /*
1136*4882a593Smuzhiyun * __device_release_driver() must be called with @dev lock held.
1137*4882a593Smuzhiyun * When called for a USB interface, @dev->parent lock must be held as well.
1138*4882a593Smuzhiyun */
__device_release_driver(struct device * dev,struct device * parent)1139*4882a593Smuzhiyun static void __device_release_driver(struct device *dev, struct device *parent)
1140*4882a593Smuzhiyun {
1141*4882a593Smuzhiyun struct device_driver *drv;
1142*4882a593Smuzhiyun
1143*4882a593Smuzhiyun drv = dev->driver;
1144*4882a593Smuzhiyun if (drv) {
1145*4882a593Smuzhiyun pm_runtime_get_sync(dev);
1146*4882a593Smuzhiyun
1147*4882a593Smuzhiyun while (device_links_busy(dev)) {
1148*4882a593Smuzhiyun __device_driver_unlock(dev, parent);
1149*4882a593Smuzhiyun
1150*4882a593Smuzhiyun device_links_unbind_consumers(dev);
1151*4882a593Smuzhiyun
1152*4882a593Smuzhiyun __device_driver_lock(dev, parent);
1153*4882a593Smuzhiyun /*
1154*4882a593Smuzhiyun * A concurrent invocation of the same function might
1155*4882a593Smuzhiyun * have released the driver successfully while this one
1156*4882a593Smuzhiyun * was waiting, so check for that.
1157*4882a593Smuzhiyun */
1158*4882a593Smuzhiyun if (dev->driver != drv) {
1159*4882a593Smuzhiyun pm_runtime_put(dev);
1160*4882a593Smuzhiyun return;
1161*4882a593Smuzhiyun }
1162*4882a593Smuzhiyun }
1163*4882a593Smuzhiyun
1164*4882a593Smuzhiyun driver_sysfs_remove(dev);
1165*4882a593Smuzhiyun
1166*4882a593Smuzhiyun if (dev->bus)
1167*4882a593Smuzhiyun blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
1168*4882a593Smuzhiyun BUS_NOTIFY_UNBIND_DRIVER,
1169*4882a593Smuzhiyun dev);
1170*4882a593Smuzhiyun
1171*4882a593Smuzhiyun pm_runtime_put_sync(dev);
1172*4882a593Smuzhiyun
1173*4882a593Smuzhiyun device_remove_file(dev, &dev_attr_state_synced);
1174*4882a593Smuzhiyun device_remove_groups(dev, drv->dev_groups);
1175*4882a593Smuzhiyun
1176*4882a593Smuzhiyun if (dev->bus && dev->bus->remove)
1177*4882a593Smuzhiyun dev->bus->remove(dev);
1178*4882a593Smuzhiyun else if (drv->remove)
1179*4882a593Smuzhiyun drv->remove(dev);
1180*4882a593Smuzhiyun
1181*4882a593Smuzhiyun device_links_driver_cleanup(dev);
1182*4882a593Smuzhiyun
1183*4882a593Smuzhiyun devres_release_all(dev);
1184*4882a593Smuzhiyun arch_teardown_dma_ops(dev);
1185*4882a593Smuzhiyun kfree(dev->dma_range_map);
1186*4882a593Smuzhiyun dev->dma_range_map = NULL;
1187*4882a593Smuzhiyun dev->driver = NULL;
1188*4882a593Smuzhiyun dev_set_drvdata(dev, NULL);
1189*4882a593Smuzhiyun if (dev->pm_domain && dev->pm_domain->dismiss)
1190*4882a593Smuzhiyun dev->pm_domain->dismiss(dev);
1191*4882a593Smuzhiyun pm_runtime_reinit(dev);
1192*4882a593Smuzhiyun dev_pm_set_driver_flags(dev, 0);
1193*4882a593Smuzhiyun
1194*4882a593Smuzhiyun klist_remove(&dev->p->knode_driver);
1195*4882a593Smuzhiyun device_pm_check_callbacks(dev);
1196*4882a593Smuzhiyun if (dev->bus)
1197*4882a593Smuzhiyun blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
1198*4882a593Smuzhiyun BUS_NOTIFY_UNBOUND_DRIVER,
1199*4882a593Smuzhiyun dev);
1200*4882a593Smuzhiyun
1201*4882a593Smuzhiyun kobject_uevent(&dev->kobj, KOBJ_UNBIND);
1202*4882a593Smuzhiyun }
1203*4882a593Smuzhiyun }
1204*4882a593Smuzhiyun
device_release_driver_internal(struct device * dev,struct device_driver * drv,struct device * parent)1205*4882a593Smuzhiyun void device_release_driver_internal(struct device *dev,
1206*4882a593Smuzhiyun struct device_driver *drv,
1207*4882a593Smuzhiyun struct device *parent)
1208*4882a593Smuzhiyun {
1209*4882a593Smuzhiyun __device_driver_lock(dev, parent);
1210*4882a593Smuzhiyun
1211*4882a593Smuzhiyun if (!drv || drv == dev->driver)
1212*4882a593Smuzhiyun __device_release_driver(dev, parent);
1213*4882a593Smuzhiyun
1214*4882a593Smuzhiyun __device_driver_unlock(dev, parent);
1215*4882a593Smuzhiyun }
1216*4882a593Smuzhiyun
1217*4882a593Smuzhiyun /**
1218*4882a593Smuzhiyun * device_release_driver - manually detach device from driver.
1219*4882a593Smuzhiyun * @dev: device.
1220*4882a593Smuzhiyun *
1221*4882a593Smuzhiyun * Manually detach device from driver.
1222*4882a593Smuzhiyun * When called for a USB interface, @dev->parent lock must be held.
1223*4882a593Smuzhiyun *
1224*4882a593Smuzhiyun * If this function is to be called with @dev->parent lock held, ensure that
1225*4882a593Smuzhiyun * the device's consumers are unbound in advance or that their locks can be
1226*4882a593Smuzhiyun * acquired under the @dev->parent lock.
1227*4882a593Smuzhiyun */
device_release_driver(struct device * dev)1228*4882a593Smuzhiyun void device_release_driver(struct device *dev)
1229*4882a593Smuzhiyun {
1230*4882a593Smuzhiyun /*
1231*4882a593Smuzhiyun * If anyone calls device_release_driver() recursively from
1232*4882a593Smuzhiyun * within their ->remove callback for the same device, they
1233*4882a593Smuzhiyun * will deadlock right here.
1234*4882a593Smuzhiyun */
1235*4882a593Smuzhiyun device_release_driver_internal(dev, NULL, NULL);
1236*4882a593Smuzhiyun }
1237*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(device_release_driver);
1238*4882a593Smuzhiyun
1239*4882a593Smuzhiyun /**
1240*4882a593Smuzhiyun * device_driver_detach - detach driver from a specific device
1241*4882a593Smuzhiyun * @dev: device to detach driver from
1242*4882a593Smuzhiyun *
1243*4882a593Smuzhiyun * Detach driver from device. Will acquire both @dev lock and @dev->parent
1244*4882a593Smuzhiyun * lock if needed.
1245*4882a593Smuzhiyun */
device_driver_detach(struct device * dev)1246*4882a593Smuzhiyun void device_driver_detach(struct device *dev)
1247*4882a593Smuzhiyun {
1248*4882a593Smuzhiyun device_release_driver_internal(dev, NULL, dev->parent);
1249*4882a593Smuzhiyun }
1250*4882a593Smuzhiyun
1251*4882a593Smuzhiyun /**
1252*4882a593Smuzhiyun * driver_detach - detach driver from all devices it controls.
1253*4882a593Smuzhiyun * @drv: driver.
1254*4882a593Smuzhiyun */
driver_detach(struct device_driver * drv)1255*4882a593Smuzhiyun void driver_detach(struct device_driver *drv)
1256*4882a593Smuzhiyun {
1257*4882a593Smuzhiyun struct device_private *dev_prv;
1258*4882a593Smuzhiyun struct device *dev;
1259*4882a593Smuzhiyun
1260*4882a593Smuzhiyun if (driver_allows_async_probing(drv))
1261*4882a593Smuzhiyun async_synchronize_full();
1262*4882a593Smuzhiyun
1263*4882a593Smuzhiyun for (;;) {
1264*4882a593Smuzhiyun spin_lock(&drv->p->klist_devices.k_lock);
1265*4882a593Smuzhiyun if (list_empty(&drv->p->klist_devices.k_list)) {
1266*4882a593Smuzhiyun spin_unlock(&drv->p->klist_devices.k_lock);
1267*4882a593Smuzhiyun break;
1268*4882a593Smuzhiyun }
1269*4882a593Smuzhiyun dev_prv = list_last_entry(&drv->p->klist_devices.k_list,
1270*4882a593Smuzhiyun struct device_private,
1271*4882a593Smuzhiyun knode_driver.n_node);
1272*4882a593Smuzhiyun dev = dev_prv->device;
1273*4882a593Smuzhiyun get_device(dev);
1274*4882a593Smuzhiyun spin_unlock(&drv->p->klist_devices.k_lock);
1275*4882a593Smuzhiyun device_release_driver_internal(dev, drv, dev->parent);
1276*4882a593Smuzhiyun put_device(dev);
1277*4882a593Smuzhiyun }
1278*4882a593Smuzhiyun }
1279