• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  /*
2   * drivers/base/dd.c - The core device/driver interactions.
3   *
4   * This file contains the (sometimes tricky) code that controls the
5   * interactions between devices and drivers, which primarily includes
6   * driver binding and unbinding.
7   *
8   * All of this code used to exist in drivers/base/bus.c, but was
9   * relocated to here in the name of compartmentalization (since it wasn't
10   * strictly code just for the 'struct bus_type'.
11   *
12   * Copyright (c) 2002-5 Patrick Mochel
13   * Copyright (c) 2002-3 Open Source Development Labs
14   * Copyright (c) 2007-2009 Greg Kroah-Hartman <gregkh@suse.de>
15   * Copyright (c) 2007-2009 Novell Inc.
16   *
17   * This file is released under the GPLv2
18   */
19  
20  #include <linux/device.h>
21  #include <linux/delay.h>
22  #include <linux/module.h>
23  #include <linux/kthread.h>
24  #include <linux/wait.h>
25  #include <linux/async.h>
26  #include <linux/pm_runtime.h>
27  #include <linux/pinctrl/devinfo.h>
28  
29  #include "base.h"
30  #include "power/power.h"
31  
32  /*
33   * Deferred Probe infrastructure.
34   *
35   * Sometimes driver probe order matters, but the kernel doesn't always have
36   * dependency information which means some drivers will get probed before a
37   * resource it depends on is available.  For example, an SDHCI driver may
38   * first need a GPIO line from an i2c GPIO controller before it can be
39   * initialized.  If a required resource is not available yet, a driver can
40   * request probing to be deferred by returning -EPROBE_DEFER from its probe hook
41   *
42   * Deferred probe maintains two lists of devices, a pending list and an active
43   * list.  A driver returning -EPROBE_DEFER causes the device to be added to the
44   * pending list.  A successful driver probe will trigger moving all devices
45   * from the pending to the active list so that the workqueue will eventually
46   * retry them.
47   *
48   * The deferred_probe_mutex must be held any time the deferred_probe_*_list
49   * of the (struct device*)->p->deferred_probe pointers are manipulated
50   */
51  static DEFINE_MUTEX(deferred_probe_mutex);
52  static LIST_HEAD(deferred_probe_pending_list);
53  static LIST_HEAD(deferred_probe_active_list);
54  static struct workqueue_struct *deferred_wq;
55  static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
56  
57  /*
58   * deferred_probe_work_func() - Retry probing devices in the active list.
59   */
deferred_probe_work_func(struct work_struct * work)60  static void deferred_probe_work_func(struct work_struct *work)
61  {
62  	struct device *dev;
63  	struct device_private *private;
64  	/*
65  	 * This block processes every device in the deferred 'active' list.
66  	 * Each device is removed from the active list and passed to
67  	 * bus_probe_device() to re-attempt the probe.  The loop continues
68  	 * until every device in the active list is removed and retried.
69  	 *
70  	 * Note: Once the device is removed from the list and the mutex is
71  	 * released, it is possible for the device get freed by another thread
72  	 * and cause a illegal pointer dereference.  This code uses
73  	 * get/put_device() to ensure the device structure cannot disappear
74  	 * from under our feet.
75  	 */
76  	mutex_lock(&deferred_probe_mutex);
77  	while (!list_empty(&deferred_probe_active_list)) {
78  		private = list_first_entry(&deferred_probe_active_list,
79  					typeof(*dev->p), deferred_probe);
80  		dev = private->device;
81  		list_del_init(&private->deferred_probe);
82  
83  		get_device(dev);
84  
85  		/*
86  		 * Drop the mutex while probing each device; the probe path may
87  		 * manipulate the deferred list
88  		 */
89  		mutex_unlock(&deferred_probe_mutex);
90  
91  		/*
92  		 * Force the device to the end of the dpm_list since
93  		 * the PM code assumes that the order we add things to
94  		 * the list is a good order for suspend but deferred
95  		 * probe makes that very unsafe.
96  		 */
97  		device_pm_lock();
98  		device_pm_move_last(dev);
99  		device_pm_unlock();
100  
101  		dev_dbg(dev, "Retrying from deferred list\n");
102  		bus_probe_device(dev);
103  
104  		mutex_lock(&deferred_probe_mutex);
105  
106  		put_device(dev);
107  	}
108  	mutex_unlock(&deferred_probe_mutex);
109  }
110  static DECLARE_WORK(deferred_probe_work, deferred_probe_work_func);
111  
driver_deferred_probe_add(struct device * dev)112  static void driver_deferred_probe_add(struct device *dev)
113  {
114  	mutex_lock(&deferred_probe_mutex);
115  	if (list_empty(&dev->p->deferred_probe)) {
116  		dev_dbg(dev, "Added to deferred list\n");
117  		list_add_tail(&dev->p->deferred_probe, &deferred_probe_pending_list);
118  	}
119  	mutex_unlock(&deferred_probe_mutex);
120  }
121  
driver_deferred_probe_del(struct device * dev)122  void driver_deferred_probe_del(struct device *dev)
123  {
124  	mutex_lock(&deferred_probe_mutex);
125  	if (!list_empty(&dev->p->deferred_probe)) {
126  		dev_dbg(dev, "Removed from deferred list\n");
127  		list_del_init(&dev->p->deferred_probe);
128  	}
129  	mutex_unlock(&deferred_probe_mutex);
130  }
131  
132  static bool driver_deferred_probe_enable = false;
133  /**
134   * driver_deferred_probe_trigger() - Kick off re-probing deferred devices
135   *
136   * This functions moves all devices from the pending list to the active
137   * list and schedules the deferred probe workqueue to process them.  It
138   * should be called anytime a driver is successfully bound to a device.
139   *
140   * Note, there is a race condition in multi-threaded probe. In the case where
141   * more than one device is probing at the same time, it is possible for one
142   * probe to complete successfully while another is about to defer. If the second
143   * depends on the first, then it will get put on the pending list after the
144   * trigger event has already occurred and will be stuck there.
145   *
146   * The atomic 'deferred_trigger_count' is used to determine if a successful
147   * trigger has occurred in the midst of probing a driver. If the trigger count
148   * changes in the midst of a probe, then deferred processing should be triggered
149   * again.
150   */
driver_deferred_probe_trigger(void)151  static void driver_deferred_probe_trigger(void)
152  {
153  	if (!driver_deferred_probe_enable)
154  		return;
155  
156  	/*
157  	 * A successful probe means that all the devices in the pending list
158  	 * should be triggered to be reprobed.  Move all the deferred devices
159  	 * into the active list so they can be retried by the workqueue
160  	 */
161  	mutex_lock(&deferred_probe_mutex);
162  	atomic_inc(&deferred_trigger_count);
163  	list_splice_tail_init(&deferred_probe_pending_list,
164  			      &deferred_probe_active_list);
165  	mutex_unlock(&deferred_probe_mutex);
166  
167  	/*
168  	 * Kick the re-probe thread.  It may already be scheduled, but it is
169  	 * safe to kick it again.
170  	 */
171  	queue_work(deferred_wq, &deferred_probe_work);
172  }
173  
174  /**
175   * deferred_probe_initcall() - Enable probing of deferred devices
176   *
177   * We don't want to get in the way when the bulk of drivers are getting probed.
178   * Instead, this initcall makes sure that deferred probing is delayed until
179   * late_initcall time.
180   */
deferred_probe_initcall(void)181  static int deferred_probe_initcall(void)
182  {
183  	deferred_wq = create_singlethread_workqueue("deferwq");
184  	if (WARN_ON(!deferred_wq))
185  		return -ENOMEM;
186  
187  	driver_deferred_probe_enable = true;
188  	driver_deferred_probe_trigger();
189  	/* Sort as many dependencies as possible before exiting initcalls */
190  	flush_workqueue(deferred_wq);
191  	return 0;
192  }
193  late_initcall(deferred_probe_initcall);
194  
driver_bound(struct device * dev)195  static void driver_bound(struct device *dev)
196  {
197  	if (klist_node_attached(&dev->p->knode_driver)) {
198  		printk(KERN_WARNING "%s: device %s already bound\n",
199  			__func__, kobject_name(&dev->kobj));
200  		return;
201  	}
202  
203  	pr_debug("driver: '%s': %s: bound to device '%s'\n", dev->driver->name,
204  		 __func__, dev_name(dev));
205  
206  	klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
207  
208  	device_pm_check_callbacks(dev);
209  
210  	/*
211  	 * Make sure the device is no longer in one of the deferred lists and
212  	 * kick off retrying all pending devices
213  	 */
214  	driver_deferred_probe_del(dev);
215  	driver_deferred_probe_trigger();
216  
217  	if (dev->bus)
218  		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
219  					     BUS_NOTIFY_BOUND_DRIVER, dev);
220  }
221  
driver_sysfs_add(struct device * dev)222  static int driver_sysfs_add(struct device *dev)
223  {
224  	int ret;
225  
226  	if (dev->bus)
227  		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
228  					     BUS_NOTIFY_BIND_DRIVER, dev);
229  
230  	ret = sysfs_create_link(&dev->driver->p->kobj, &dev->kobj,
231  			  kobject_name(&dev->kobj));
232  	if (ret == 0) {
233  		ret = sysfs_create_link(&dev->kobj, &dev->driver->p->kobj,
234  					"driver");
235  		if (ret)
236  			sysfs_remove_link(&dev->driver->p->kobj,
237  					kobject_name(&dev->kobj));
238  	}
239  	return ret;
240  }
241  
driver_sysfs_remove(struct device * dev)242  static void driver_sysfs_remove(struct device *dev)
243  {
244  	struct device_driver *drv = dev->driver;
245  
246  	if (drv) {
247  		sysfs_remove_link(&drv->p->kobj, kobject_name(&dev->kobj));
248  		sysfs_remove_link(&dev->kobj, "driver");
249  	}
250  }
251  
252  /**
253   * device_bind_driver - bind a driver to one device.
254   * @dev: device.
255   *
256   * Allow manual attachment of a driver to a device.
257   * Caller must have already set @dev->driver.
258   *
259   * Note that this does not modify the bus reference count
260   * nor take the bus's rwsem. Please verify those are accounted
261   * for before calling this. (It is ok to call with no other effort
262   * from a driver's probe() method.)
263   *
264   * This function must be called with the device lock held.
265   */
device_bind_driver(struct device * dev)266  int device_bind_driver(struct device *dev)
267  {
268  	int ret;
269  
270  	ret = driver_sysfs_add(dev);
271  	if (!ret)
272  		driver_bound(dev);
273  	return ret;
274  }
275  EXPORT_SYMBOL_GPL(device_bind_driver);
276  
277  static atomic_t probe_count = ATOMIC_INIT(0);
278  static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue);
279  
really_probe(struct device * dev,struct device_driver * drv)280  static int really_probe(struct device *dev, struct device_driver *drv)
281  {
282  	int ret = 0;
283  	int local_trigger_count = atomic_read(&deferred_trigger_count);
284  
285  	atomic_inc(&probe_count);
286  	pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
287  		 drv->bus->name, __func__, drv->name, dev_name(dev));
288  	if (!list_empty(&dev->devres_head)) {
289  		dev_crit(dev, "Resources present before probing\n");
290  		ret = -EBUSY;
291  		goto done;
292  	}
293  
294  	dev->driver = drv;
295  
296  	/* If using pinctrl, bind pins now before probing */
297  	ret = pinctrl_bind_pins(dev);
298  	if (ret)
299  		goto probe_failed;
300  
301  	if (driver_sysfs_add(dev)) {
302  		printk(KERN_ERR "%s: driver_sysfs_add(%s) failed\n",
303  			__func__, dev_name(dev));
304  		goto probe_failed;
305  	}
306  
307  	if (dev->pm_domain && dev->pm_domain->activate) {
308  		ret = dev->pm_domain->activate(dev);
309  		if (ret)
310  			goto probe_failed;
311  	}
312  
313  	if (dev->bus->probe) {
314  		ret = dev->bus->probe(dev);
315  		if (ret)
316  			goto probe_failed;
317  	} else if (drv->probe) {
318  		ret = drv->probe(dev);
319  		if (ret)
320  			goto probe_failed;
321  	}
322  
323  	pinctrl_init_done(dev);
324  
325  	if (dev->pm_domain && dev->pm_domain->sync)
326  		dev->pm_domain->sync(dev);
327  
328  	driver_bound(dev);
329  	ret = 1;
330  	pr_debug("bus: '%s': %s: bound device %s to driver %s\n",
331  		 drv->bus->name, __func__, dev_name(dev), drv->name);
332  	goto done;
333  
334  probe_failed:
335  	devres_release_all(dev);
336  	driver_sysfs_remove(dev);
337  	dev->driver = NULL;
338  	dev_set_drvdata(dev, NULL);
339  	if (dev->pm_domain && dev->pm_domain->dismiss)
340  		dev->pm_domain->dismiss(dev);
341  
342  	switch (ret) {
343  	case -EPROBE_DEFER:
344  		/* Driver requested deferred probing */
345  		dev_dbg(dev, "Driver %s requests probe deferral\n", drv->name);
346  		driver_deferred_probe_add(dev);
347  		/* Did a trigger occur while probing? Need to re-trigger if yes */
348  		if (local_trigger_count != atomic_read(&deferred_trigger_count))
349  			driver_deferred_probe_trigger();
350  		break;
351  	case -ENODEV:
352  	case -ENXIO:
353  		pr_debug("%s: probe of %s rejects match %d\n",
354  			 drv->name, dev_name(dev), ret);
355  		break;
356  	default:
357  		/* driver matched but the probe failed */
358  		printk(KERN_WARNING
359  		       "%s: probe of %s failed with error %d\n",
360  		       drv->name, dev_name(dev), ret);
361  	}
362  	/*
363  	 * Ignore errors returned by ->probe so that the next driver can try
364  	 * its luck.
365  	 */
366  	ret = 0;
367  done:
368  	atomic_dec(&probe_count);
369  	wake_up_all(&probe_waitqueue);
370  	return ret;
371  }
372  
373  /**
374   * driver_probe_done
375   * Determine if the probe sequence is finished or not.
376   *
377   * Should somehow figure out how to use a semaphore, not an atomic variable...
378   */
driver_probe_done(void)379  int driver_probe_done(void)
380  {
381  	pr_debug("%s: probe_count = %d\n", __func__,
382  		 atomic_read(&probe_count));
383  	if (atomic_read(&probe_count))
384  		return -EBUSY;
385  	return 0;
386  }
387  
388  /**
389   * wait_for_device_probe
390   * Wait for device probing to be completed.
391   */
wait_for_device_probe(void)392  void wait_for_device_probe(void)
393  {
394  	/* wait for the known devices to complete their probing */
395  	wait_event(probe_waitqueue, atomic_read(&probe_count) == 0);
396  	async_synchronize_full();
397  }
398  EXPORT_SYMBOL_GPL(wait_for_device_probe);
399  
400  /**
401   * driver_probe_device - attempt to bind device & driver together
402   * @drv: driver to bind a device to
403   * @dev: device to try to bind to the driver
404   *
405   * This function returns -ENODEV if the device is not registered,
406   * 1 if the device is bound successfully and 0 otherwise.
407   *
408   * This function must be called with @dev lock held.  When called for a
409   * USB interface, @dev->parent lock must be held as well.
410   *
411   * If the device has a parent, runtime-resume the parent before driver probing.
412   */
driver_probe_device(struct device_driver * drv,struct device * dev)413  int driver_probe_device(struct device_driver *drv, struct device *dev)
414  {
415  	int ret = 0;
416  
417  	if (!device_is_registered(dev))
418  		return -ENODEV;
419  
420  	pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
421  		 drv->bus->name, __func__, dev_name(dev), drv->name);
422  
423  	if (dev->parent)
424  		pm_runtime_get_sync(dev->parent);
425  
426  	pm_runtime_barrier(dev);
427  	ret = really_probe(dev, drv);
428  	pm_request_idle(dev);
429  
430  	if (dev->parent)
431  		pm_runtime_put(dev->parent);
432  
433  	return ret;
434  }
435  
driver_allows_async_probing(struct device_driver * drv)436  bool driver_allows_async_probing(struct device_driver *drv)
437  {
438  	switch (drv->probe_type) {
439  	case PROBE_PREFER_ASYNCHRONOUS:
440  		return true;
441  
442  	case PROBE_FORCE_SYNCHRONOUS:
443  		return false;
444  
445  	default:
446  		if (module_requested_async_probing(drv->owner))
447  			return true;
448  
449  		return false;
450  	}
451  }
452  
453  struct device_attach_data {
454  	struct device *dev;
455  
456  	/*
457  	 * Indicates whether we are are considering asynchronous probing or
458  	 * not. Only initial binding after device or driver registration
459  	 * (including deferral processing) may be done asynchronously, the
460  	 * rest is always synchronous, as we expect it is being done by
461  	 * request from userspace.
462  	 */
463  	bool check_async;
464  
465  	/*
466  	 * Indicates if we are binding synchronous or asynchronous drivers.
467  	 * When asynchronous probing is enabled we'll execute 2 passes
468  	 * over drivers: first pass doing synchronous probing and second
469  	 * doing asynchronous probing (if synchronous did not succeed -
470  	 * most likely because there was no driver requiring synchronous
471  	 * probing - and we found asynchronous driver during first pass).
472  	 * The 2 passes are done because we can't shoot asynchronous
473  	 * probe for given device and driver from bus_for_each_drv() since
474  	 * driver pointer is not guaranteed to stay valid once
475  	 * bus_for_each_drv() iterates to the next driver on the bus.
476  	 */
477  	bool want_async;
478  
479  	/*
480  	 * We'll set have_async to 'true' if, while scanning for matching
481  	 * driver, we'll encounter one that requests asynchronous probing.
482  	 */
483  	bool have_async;
484  };
485  
__device_attach_driver(struct device_driver * drv,void * _data)486  static int __device_attach_driver(struct device_driver *drv, void *_data)
487  {
488  	struct device_attach_data *data = _data;
489  	struct device *dev = data->dev;
490  	bool async_allowed;
491  
492  	/*
493  	 * Check if device has already been claimed. This may
494  	 * happen with driver loading, device discovery/registration,
495  	 * and deferred probe processing happens all at once with
496  	 * multiple threads.
497  	 */
498  	if (dev->driver)
499  		return -EBUSY;
500  
501  	if (!driver_match_device(drv, dev))
502  		return 0;
503  
504  	async_allowed = driver_allows_async_probing(drv);
505  
506  	if (async_allowed)
507  		data->have_async = true;
508  
509  	if (data->check_async && async_allowed != data->want_async)
510  		return 0;
511  
512  	return driver_probe_device(drv, dev);
513  }
514  
__device_attach_async_helper(void * _dev,async_cookie_t cookie)515  static void __device_attach_async_helper(void *_dev, async_cookie_t cookie)
516  {
517  	struct device *dev = _dev;
518  	struct device_attach_data data = {
519  		.dev		= dev,
520  		.check_async	= true,
521  		.want_async	= true,
522  	};
523  
524  	device_lock(dev);
525  
526  	if (dev->parent)
527  		pm_runtime_get_sync(dev->parent);
528  
529  	bus_for_each_drv(dev->bus, NULL, &data, __device_attach_driver);
530  	dev_dbg(dev, "async probe completed\n");
531  
532  	pm_request_idle(dev);
533  
534  	if (dev->parent)
535  		pm_runtime_put(dev->parent);
536  
537  	device_unlock(dev);
538  
539  	put_device(dev);
540  }
541  
__device_attach(struct device * dev,bool allow_async)542  static int __device_attach(struct device *dev, bool allow_async)
543  {
544  	int ret = 0;
545  
546  	device_lock(dev);
547  	if (dev->driver) {
548  		if (klist_node_attached(&dev->p->knode_driver)) {
549  			ret = 1;
550  			goto out_unlock;
551  		}
552  		ret = device_bind_driver(dev);
553  		if (ret == 0)
554  			ret = 1;
555  		else {
556  			dev->driver = NULL;
557  			ret = 0;
558  		}
559  	} else {
560  		struct device_attach_data data = {
561  			.dev = dev,
562  			.check_async = allow_async,
563  			.want_async = false,
564  		};
565  
566  		if (dev->parent)
567  			pm_runtime_get_sync(dev->parent);
568  
569  		ret = bus_for_each_drv(dev->bus, NULL, &data,
570  					__device_attach_driver);
571  		if (!ret && allow_async && data.have_async) {
572  			/*
573  			 * If we could not find appropriate driver
574  			 * synchronously and we are allowed to do
575  			 * async probes and there are drivers that
576  			 * want to probe asynchronously, we'll
577  			 * try them.
578  			 */
579  			dev_dbg(dev, "scheduling asynchronous probe\n");
580  			get_device(dev);
581  			async_schedule(__device_attach_async_helper, dev);
582  		} else {
583  			pm_request_idle(dev);
584  		}
585  
586  		if (dev->parent)
587  			pm_runtime_put(dev->parent);
588  	}
589  out_unlock:
590  	device_unlock(dev);
591  	return ret;
592  }
593  
594  /**
595   * device_attach - try to attach device to a driver.
596   * @dev: device.
597   *
598   * Walk the list of drivers that the bus has and call
599   * driver_probe_device() for each pair. If a compatible
600   * pair is found, break out and return.
601   *
602   * Returns 1 if the device was bound to a driver;
603   * 0 if no matching driver was found;
604   * -ENODEV if the device is not registered.
605   *
606   * When called for a USB interface, @dev->parent lock must be held.
607   */
device_attach(struct device * dev)608  int device_attach(struct device *dev)
609  {
610  	return __device_attach(dev, false);
611  }
612  EXPORT_SYMBOL_GPL(device_attach);
613  
device_initial_probe(struct device * dev)614  void device_initial_probe(struct device *dev)
615  {
616  	__device_attach(dev, true);
617  }
618  
__driver_attach(struct device * dev,void * data)619  static int __driver_attach(struct device *dev, void *data)
620  {
621  	struct device_driver *drv = data;
622  
623  	/*
624  	 * Lock device and try to bind to it. We drop the error
625  	 * here and always return 0, because we need to keep trying
626  	 * to bind to devices and some drivers will return an error
627  	 * simply if it didn't support the device.
628  	 *
629  	 * driver_probe_device() will spit a warning if there
630  	 * is an error.
631  	 */
632  
633  	if (!driver_match_device(drv, dev))
634  		return 0;
635  
636  	if (dev->parent)	/* Needed for USB */
637  		device_lock(dev->parent);
638  	device_lock(dev);
639  	if (!dev->driver)
640  		driver_probe_device(drv, dev);
641  	device_unlock(dev);
642  	if (dev->parent)
643  		device_unlock(dev->parent);
644  
645  	return 0;
646  }
647  
648  /**
649   * driver_attach - try to bind driver to devices.
650   * @drv: driver.
651   *
652   * Walk the list of devices that the bus has on it and try to
653   * match the driver with each one.  If driver_probe_device()
654   * returns 0 and the @dev->driver is set, we've found a
655   * compatible pair.
656   */
driver_attach(struct device_driver * drv)657  int driver_attach(struct device_driver *drv)
658  {
659  	return bus_for_each_dev(drv->bus, NULL, drv, __driver_attach);
660  }
661  EXPORT_SYMBOL_GPL(driver_attach);
662  
663  /*
664   * __device_release_driver() must be called with @dev lock held.
665   * When called for a USB interface, @dev->parent lock must be held as well.
666   */
__device_release_driver(struct device * dev)667  static void __device_release_driver(struct device *dev)
668  {
669  	struct device_driver *drv;
670  
671  	drv = dev->driver;
672  	if (drv) {
673  		if (driver_allows_async_probing(drv))
674  			async_synchronize_full();
675  
676  		pm_runtime_get_sync(dev);
677  
678  		driver_sysfs_remove(dev);
679  
680  		if (dev->bus)
681  			blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
682  						     BUS_NOTIFY_UNBIND_DRIVER,
683  						     dev);
684  
685  		pm_runtime_put_sync(dev);
686  
687  		if (dev->bus && dev->bus->remove)
688  			dev->bus->remove(dev);
689  		else if (drv->remove)
690  			drv->remove(dev);
691  		devres_release_all(dev);
692  		dev->driver = NULL;
693  		dev_set_drvdata(dev, NULL);
694  		if (dev->pm_domain && dev->pm_domain->dismiss)
695  			dev->pm_domain->dismiss(dev);
696  
697  		klist_remove(&dev->p->knode_driver);
698  		device_pm_check_callbacks(dev);
699  		if (dev->bus)
700  			blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
701  						     BUS_NOTIFY_UNBOUND_DRIVER,
702  						     dev);
703  
704  	}
705  }
706  
707  /**
708   * device_release_driver - manually detach device from driver.
709   * @dev: device.
710   *
711   * Manually detach device from driver.
712   * When called for a USB interface, @dev->parent lock must be held.
713   */
device_release_driver(struct device * dev)714  void device_release_driver(struct device *dev)
715  {
716  	/*
717  	 * If anyone calls device_release_driver() recursively from
718  	 * within their ->remove callback for the same device, they
719  	 * will deadlock right here.
720  	 */
721  	device_lock(dev);
722  	__device_release_driver(dev);
723  	device_unlock(dev);
724  }
725  EXPORT_SYMBOL_GPL(device_release_driver);
726  
727  /**
728   * driver_detach - detach driver from all devices it controls.
729   * @drv: driver.
730   */
driver_detach(struct device_driver * drv)731  void driver_detach(struct device_driver *drv)
732  {
733  	struct device_private *dev_prv;
734  	struct device *dev;
735  
736  	for (;;) {
737  		spin_lock(&drv->p->klist_devices.k_lock);
738  		if (list_empty(&drv->p->klist_devices.k_list)) {
739  			spin_unlock(&drv->p->klist_devices.k_lock);
740  			break;
741  		}
742  		dev_prv = list_entry(drv->p->klist_devices.k_list.prev,
743  				     struct device_private,
744  				     knode_driver.n_node);
745  		dev = dev_prv->device;
746  		get_device(dev);
747  		spin_unlock(&drv->p->klist_devices.k_lock);
748  
749  		if (dev->parent)	/* Needed for USB */
750  			device_lock(dev->parent);
751  		device_lock(dev);
752  		if (dev->driver == drv)
753  			__device_release_driver(dev);
754  		device_unlock(dev);
755  		if (dev->parent)
756  			device_unlock(dev->parent);
757  		put_device(dev);
758  	}
759  }
760