1 /*
2 * drivers/base/core.c - core driver model code (device registration, etc)
3 *
4 * Copyright (c) 2002-3 Patrick Mochel
5 * Copyright (c) 2002-3 Open Source Development Labs
6 * Copyright (c) 2006 Greg Kroah-Hartman <gregkh@suse.de>
7 * Copyright (c) 2006 Novell, Inc.
8 *
9 * This file is released under the GPLv2
10 *
11 */
12
13 #include <linux/cpufreq.h>
14 #include <linux/device.h>
15 #include <linux/err.h>
16 #include <linux/fwnode.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/string.h>
21 #include <linux/kdev_t.h>
22 #include <linux/notifier.h>
23 #include <linux/of.h>
24 #include <linux/of_device.h>
25 #include <linux/genhd.h>
26 #include <linux/kallsyms.h>
27 #include <linux/mutex.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/netdevice.h>
30 #include <linux/sched/signal.h>
31 #include <linux/sysfs.h>
32
33 #include "base.h"
34 #include "power/power.h"
35
36 #ifdef CONFIG_SYSFS_DEPRECATED
37 #ifdef CONFIG_SYSFS_DEPRECATED_V2
38 long sysfs_deprecated = 1;
39 #else
40 long sysfs_deprecated = 0;
41 #endif
sysfs_deprecated_setup(char * arg)42 static int __init sysfs_deprecated_setup(char *arg)
43 {
44 return kstrtol(arg, 10, &sysfs_deprecated);
45 }
46 early_param("sysfs.deprecated", sysfs_deprecated_setup);
47 #endif
48
49 /* Device links support. */
50
51 #ifdef CONFIG_SRCU
52 static DEFINE_MUTEX(device_links_lock);
53 DEFINE_STATIC_SRCU(device_links_srcu);
54
device_links_write_lock(void)55 static inline void device_links_write_lock(void)
56 {
57 mutex_lock(&device_links_lock);
58 }
59
device_links_write_unlock(void)60 static inline void device_links_write_unlock(void)
61 {
62 mutex_unlock(&device_links_lock);
63 }
64
device_links_read_lock(void)65 int device_links_read_lock(void)
66 {
67 return srcu_read_lock(&device_links_srcu);
68 }
69
device_links_read_unlock(int idx)70 void device_links_read_unlock(int idx)
71 {
72 srcu_read_unlock(&device_links_srcu, idx);
73 }
74 #else /* !CONFIG_SRCU */
75 static DECLARE_RWSEM(device_links_lock);
76
device_links_write_lock(void)77 static inline void device_links_write_lock(void)
78 {
79 down_write(&device_links_lock);
80 }
81
device_links_write_unlock(void)82 static inline void device_links_write_unlock(void)
83 {
84 up_write(&device_links_lock);
85 }
86
device_links_read_lock(void)87 int device_links_read_lock(void)
88 {
89 down_read(&device_links_lock);
90 return 0;
91 }
92
device_links_read_unlock(int not_used)93 void device_links_read_unlock(int not_used)
94 {
95 up_read(&device_links_lock);
96 }
97 #endif /* !CONFIG_SRCU */
98
99 /**
100 * device_is_dependent - Check if one device depends on another one
101 * @dev: Device to check dependencies for.
102 * @target: Device to check against.
103 *
104 * Check if @target depends on @dev or any device dependent on it (its child or
105 * its consumer etc). Return 1 if that is the case or 0 otherwise.
106 */
device_is_dependent(struct device * dev,void * target)107 static int device_is_dependent(struct device *dev, void *target)
108 {
109 struct device_link *link;
110 int ret;
111
112 if (WARN_ON(dev == target))
113 return 1;
114
115 ret = device_for_each_child(dev, target, device_is_dependent);
116 if (ret)
117 return ret;
118
119 list_for_each_entry(link, &dev->links.consumers, s_node) {
120 if (WARN_ON(link->consumer == target))
121 return 1;
122
123 ret = device_is_dependent(link->consumer, target);
124 if (ret)
125 break;
126 }
127 return ret;
128 }
129
device_reorder_to_tail(struct device * dev,void * not_used)130 static int device_reorder_to_tail(struct device *dev, void *not_used)
131 {
132 struct device_link *link;
133
134 /*
135 * Devices that have not been registered yet will be put to the ends
136 * of the lists during the registration, so skip them here.
137 */
138 if (device_is_registered(dev))
139 devices_kset_move_last(dev);
140
141 if (device_pm_initialized(dev))
142 device_pm_move_last(dev);
143
144 device_for_each_child(dev, NULL, device_reorder_to_tail);
145 list_for_each_entry(link, &dev->links.consumers, s_node)
146 device_reorder_to_tail(link->consumer, NULL);
147
148 return 0;
149 }
150
151 /**
152 * device_link_add - Create a link between two devices.
153 * @consumer: Consumer end of the link.
154 * @supplier: Supplier end of the link.
155 * @flags: Link flags.
156 *
157 * The caller is responsible for the proper synchronization of the link creation
158 * with runtime PM. First, setting the DL_FLAG_PM_RUNTIME flag will cause the
159 * runtime PM framework to take the link into account. Second, if the
160 * DL_FLAG_RPM_ACTIVE flag is set in addition to it, the supplier devices will
161 * be forced into the active metastate and reference-counted upon the creation
162 * of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be
163 * ignored.
164 *
165 * If the DL_FLAG_AUTOREMOVE is set, the link will be removed automatically
166 * when the consumer device driver unbinds from it. The combination of both
167 * DL_FLAG_AUTOREMOVE and DL_FLAG_STATELESS set is invalid and will cause NULL
168 * to be returned.
169 *
170 * A side effect of the link creation is re-ordering of dpm_list and the
171 * devices_kset list by moving the consumer device and all devices depending
172 * on it to the ends of these lists (that does not happen to devices that have
173 * not been registered when this function is called).
174 *
175 * The supplier device is required to be registered when this function is called
176 * and NULL will be returned if that is not the case. The consumer device need
177 * not be registered, however.
178 */
device_link_add(struct device * consumer,struct device * supplier,u32 flags)179 struct device_link *device_link_add(struct device *consumer,
180 struct device *supplier, u32 flags)
181 {
182 struct device_link *link;
183 bool rpm_put_supplier = false;
184
185 if (!consumer || !supplier ||
186 ((flags & DL_FLAG_STATELESS) && (flags & DL_FLAG_AUTOREMOVE)))
187 return NULL;
188
189 if (flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) {
190 if (pm_runtime_get_sync(supplier) < 0) {
191 pm_runtime_put_noidle(supplier);
192 return NULL;
193 }
194 rpm_put_supplier = true;
195 }
196
197 device_links_write_lock();
198 device_pm_lock();
199
200 /*
201 * If the supplier has not been fully registered yet or there is a
202 * reverse dependency between the consumer and the supplier already in
203 * the graph, return NULL.
204 */
205 if (!device_pm_initialized(supplier)
206 || device_is_dependent(consumer, supplier)) {
207 link = NULL;
208 goto out;
209 }
210
211 list_for_each_entry(link, &supplier->links.consumers, s_node)
212 if (link->consumer == consumer)
213 goto out;
214
215 link = kzalloc(sizeof(*link), GFP_KERNEL);
216 if (!link)
217 goto out;
218
219 if (flags & DL_FLAG_PM_RUNTIME) {
220 if (flags & DL_FLAG_RPM_ACTIVE) {
221 link->rpm_active = true;
222 rpm_put_supplier = false;
223 }
224 pm_runtime_new_link(consumer);
225 /*
226 * If the link is being added by the consumer driver at probe
227 * time, balance the decrementation of the supplier's runtime PM
228 * usage counter after consumer probe in driver_probe_device().
229 */
230 if (consumer->links.status == DL_DEV_PROBING)
231 pm_runtime_get_noresume(supplier);
232 }
233 get_device(supplier);
234 link->supplier = supplier;
235 INIT_LIST_HEAD(&link->s_node);
236 get_device(consumer);
237 link->consumer = consumer;
238 INIT_LIST_HEAD(&link->c_node);
239 link->flags = flags;
240
241 /* Determine the initial link state. */
242 if (flags & DL_FLAG_STATELESS) {
243 link->status = DL_STATE_NONE;
244 } else {
245 switch (supplier->links.status) {
246 case DL_DEV_DRIVER_BOUND:
247 switch (consumer->links.status) {
248 case DL_DEV_PROBING:
249 /*
250 * Some callers expect the link creation during
251 * consumer driver probe to resume the supplier
252 * even without DL_FLAG_RPM_ACTIVE.
253 */
254 if (flags & DL_FLAG_PM_RUNTIME)
255 pm_runtime_resume(supplier);
256
257 link->status = DL_STATE_CONSUMER_PROBE;
258 break;
259 case DL_DEV_DRIVER_BOUND:
260 link->status = DL_STATE_ACTIVE;
261 break;
262 default:
263 link->status = DL_STATE_AVAILABLE;
264 break;
265 }
266 break;
267 case DL_DEV_UNBINDING:
268 link->status = DL_STATE_SUPPLIER_UNBIND;
269 break;
270 default:
271 link->status = DL_STATE_DORMANT;
272 break;
273 }
274 }
275
276 /*
277 * Move the consumer and all of the devices depending on it to the end
278 * of dpm_list and the devices_kset list.
279 *
280 * It is necessary to hold dpm_list locked throughout all that or else
281 * we may end up suspending with a wrong ordering of it.
282 */
283 device_reorder_to_tail(consumer, NULL);
284
285 list_add_tail_rcu(&link->s_node, &supplier->links.consumers);
286 list_add_tail_rcu(&link->c_node, &consumer->links.suppliers);
287
288 dev_info(consumer, "Linked as a consumer to %s\n", dev_name(supplier));
289
290 out:
291 device_pm_unlock();
292 device_links_write_unlock();
293
294 if (rpm_put_supplier)
295 pm_runtime_put(supplier);
296
297 return link;
298 }
299 EXPORT_SYMBOL_GPL(device_link_add);
300
device_link_free(struct device_link * link)301 static void device_link_free(struct device_link *link)
302 {
303 put_device(link->consumer);
304 put_device(link->supplier);
305 kfree(link);
306 }
307
308 #ifdef CONFIG_SRCU
__device_link_free_srcu(struct rcu_head * rhead)309 static void __device_link_free_srcu(struct rcu_head *rhead)
310 {
311 device_link_free(container_of(rhead, struct device_link, rcu_head));
312 }
313
__device_link_del(struct device_link * link)314 static void __device_link_del(struct device_link *link)
315 {
316 dev_info(link->consumer, "Dropping the link to %s\n",
317 dev_name(link->supplier));
318
319 if (link->flags & DL_FLAG_PM_RUNTIME)
320 pm_runtime_drop_link(link->consumer);
321
322 list_del_rcu(&link->s_node);
323 list_del_rcu(&link->c_node);
324 call_srcu(&device_links_srcu, &link->rcu_head, __device_link_free_srcu);
325 }
326 #else /* !CONFIG_SRCU */
__device_link_del(struct device_link * link)327 static void __device_link_del(struct device_link *link)
328 {
329 dev_info(link->consumer, "Dropping the link to %s\n",
330 dev_name(link->supplier));
331
332 if (link->flags & DL_FLAG_PM_RUNTIME)
333 pm_runtime_drop_link(link->consumer);
334
335 list_del(&link->s_node);
336 list_del(&link->c_node);
337 device_link_free(link);
338 }
339 #endif /* !CONFIG_SRCU */
340
341 /**
342 * device_link_del - Delete a link between two devices.
343 * @link: Device link to delete.
344 *
345 * The caller must ensure proper synchronization of this function with runtime
346 * PM.
347 */
device_link_del(struct device_link * link)348 void device_link_del(struct device_link *link)
349 {
350 device_links_write_lock();
351 device_pm_lock();
352 __device_link_del(link);
353 device_pm_unlock();
354 device_links_write_unlock();
355 }
356 EXPORT_SYMBOL_GPL(device_link_del);
357
device_links_missing_supplier(struct device * dev)358 static void device_links_missing_supplier(struct device *dev)
359 {
360 struct device_link *link;
361
362 list_for_each_entry(link, &dev->links.suppliers, c_node)
363 if (link->status == DL_STATE_CONSUMER_PROBE)
364 WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
365 }
366
367 /**
368 * device_links_check_suppliers - Check presence of supplier drivers.
369 * @dev: Consumer device.
370 *
371 * Check links from this device to any suppliers. Walk the list of the device's
372 * links to suppliers and see if all of them are available. If not, simply
373 * return -EPROBE_DEFER.
374 *
375 * We need to guarantee that the supplier will not go away after the check has
376 * been positive here. It only can go away in __device_release_driver() and
377 * that function checks the device's links to consumers. This means we need to
378 * mark the link as "consumer probe in progress" to make the supplier removal
379 * wait for us to complete (or bad things may happen).
380 *
381 * Links with the DL_FLAG_STATELESS flag set are ignored.
382 */
device_links_check_suppliers(struct device * dev)383 int device_links_check_suppliers(struct device *dev)
384 {
385 struct device_link *link;
386 int ret = 0;
387
388 device_links_write_lock();
389
390 list_for_each_entry(link, &dev->links.suppliers, c_node) {
391 if (link->flags & DL_FLAG_STATELESS)
392 continue;
393
394 if (link->status != DL_STATE_AVAILABLE) {
395 device_links_missing_supplier(dev);
396 ret = -EPROBE_DEFER;
397 break;
398 }
399 WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE);
400 }
401 dev->links.status = DL_DEV_PROBING;
402
403 device_links_write_unlock();
404 return ret;
405 }
406
407 /**
408 * device_links_driver_bound - Update device links after probing its driver.
409 * @dev: Device to update the links for.
410 *
411 * The probe has been successful, so update links from this device to any
412 * consumers by changing their status to "available".
413 *
414 * Also change the status of @dev's links to suppliers to "active".
415 *
416 * Links with the DL_FLAG_STATELESS flag set are ignored.
417 */
device_links_driver_bound(struct device * dev)418 void device_links_driver_bound(struct device *dev)
419 {
420 struct device_link *link;
421
422 device_links_write_lock();
423
424 list_for_each_entry(link, &dev->links.consumers, s_node) {
425 if (link->flags & DL_FLAG_STATELESS)
426 continue;
427
428 WARN_ON(link->status != DL_STATE_DORMANT);
429 WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
430 }
431
432 list_for_each_entry(link, &dev->links.suppliers, c_node) {
433 if (link->flags & DL_FLAG_STATELESS)
434 continue;
435
436 WARN_ON(link->status != DL_STATE_CONSUMER_PROBE);
437 WRITE_ONCE(link->status, DL_STATE_ACTIVE);
438 }
439
440 dev->links.status = DL_DEV_DRIVER_BOUND;
441
442 device_links_write_unlock();
443 }
444
445 /**
446 * __device_links_no_driver - Update links of a device without a driver.
447 * @dev: Device without a drvier.
448 *
449 * Delete all non-persistent links from this device to any suppliers.
450 *
451 * Persistent links stay around, but their status is changed to "available",
452 * unless they already are in the "supplier unbind in progress" state in which
453 * case they need not be updated.
454 *
455 * Links with the DL_FLAG_STATELESS flag set are ignored.
456 */
__device_links_no_driver(struct device * dev)457 static void __device_links_no_driver(struct device *dev)
458 {
459 struct device_link *link, *ln;
460
461 list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
462 if (link->flags & DL_FLAG_STATELESS)
463 continue;
464
465 if (link->flags & DL_FLAG_AUTOREMOVE)
466 __device_link_del(link);
467 else if (link->status != DL_STATE_SUPPLIER_UNBIND)
468 WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
469 }
470
471 dev->links.status = DL_DEV_NO_DRIVER;
472 }
473
device_links_no_driver(struct device * dev)474 void device_links_no_driver(struct device *dev)
475 {
476 device_links_write_lock();
477 __device_links_no_driver(dev);
478 device_links_write_unlock();
479 }
480
481 /**
482 * device_links_driver_cleanup - Update links after driver removal.
483 * @dev: Device whose driver has just gone away.
484 *
485 * Update links to consumers for @dev by changing their status to "dormant" and
486 * invoke %__device_links_no_driver() to update links to suppliers for it as
487 * appropriate.
488 *
489 * Links with the DL_FLAG_STATELESS flag set are ignored.
490 */
device_links_driver_cleanup(struct device * dev)491 void device_links_driver_cleanup(struct device *dev)
492 {
493 struct device_link *link;
494
495 device_links_write_lock();
496
497 list_for_each_entry(link, &dev->links.consumers, s_node) {
498 if (link->flags & DL_FLAG_STATELESS)
499 continue;
500
501 WARN_ON(link->flags & DL_FLAG_AUTOREMOVE);
502 WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND);
503 WRITE_ONCE(link->status, DL_STATE_DORMANT);
504 }
505
506 __device_links_no_driver(dev);
507
508 device_links_write_unlock();
509 }
510
511 /**
512 * device_links_busy - Check if there are any busy links to consumers.
513 * @dev: Device to check.
514 *
515 * Check each consumer of the device and return 'true' if its link's status
516 * is one of "consumer probe" or "active" (meaning that the given consumer is
517 * probing right now or its driver is present). Otherwise, change the link
518 * state to "supplier unbind" to prevent the consumer from being probed
519 * successfully going forward.
520 *
521 * Return 'false' if there are no probing or active consumers.
522 *
523 * Links with the DL_FLAG_STATELESS flag set are ignored.
524 */
device_links_busy(struct device * dev)525 bool device_links_busy(struct device *dev)
526 {
527 struct device_link *link;
528 bool ret = false;
529
530 device_links_write_lock();
531
532 list_for_each_entry(link, &dev->links.consumers, s_node) {
533 if (link->flags & DL_FLAG_STATELESS)
534 continue;
535
536 if (link->status == DL_STATE_CONSUMER_PROBE
537 || link->status == DL_STATE_ACTIVE) {
538 ret = true;
539 break;
540 }
541 WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
542 }
543
544 dev->links.status = DL_DEV_UNBINDING;
545
546 device_links_write_unlock();
547 return ret;
548 }
549
550 /**
551 * device_links_unbind_consumers - Force unbind consumers of the given device.
552 * @dev: Device to unbind the consumers of.
553 *
554 * Walk the list of links to consumers for @dev and if any of them is in the
555 * "consumer probe" state, wait for all device probes in progress to complete
556 * and start over.
557 *
558 * If that's not the case, change the status of the link to "supplier unbind"
559 * and check if the link was in the "active" state. If so, force the consumer
560 * driver to unbind and start over (the consumer will not re-probe as we have
561 * changed the state of the link already).
562 *
563 * Links with the DL_FLAG_STATELESS flag set are ignored.
564 */
device_links_unbind_consumers(struct device * dev)565 void device_links_unbind_consumers(struct device *dev)
566 {
567 struct device_link *link;
568
569 start:
570 device_links_write_lock();
571
572 list_for_each_entry(link, &dev->links.consumers, s_node) {
573 enum device_link_state status;
574
575 if (link->flags & DL_FLAG_STATELESS)
576 continue;
577
578 status = link->status;
579 if (status == DL_STATE_CONSUMER_PROBE) {
580 device_links_write_unlock();
581
582 wait_for_device_probe();
583 goto start;
584 }
585 WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
586 if (status == DL_STATE_ACTIVE) {
587 struct device *consumer = link->consumer;
588
589 get_device(consumer);
590
591 device_links_write_unlock();
592
593 device_release_driver_internal(consumer, NULL,
594 consumer->parent);
595 put_device(consumer);
596 goto start;
597 }
598 }
599
600 device_links_write_unlock();
601 }
602
603 /**
604 * device_links_purge - Delete existing links to other devices.
605 * @dev: Target device.
606 */
device_links_purge(struct device * dev)607 static void device_links_purge(struct device *dev)
608 {
609 struct device_link *link, *ln;
610
611 /*
612 * Delete all of the remaining links from this device to any other
613 * devices (either consumers or suppliers).
614 */
615 device_links_write_lock();
616
617 list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
618 WARN_ON(link->status == DL_STATE_ACTIVE);
619 __device_link_del(link);
620 }
621
622 list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) {
623 WARN_ON(link->status != DL_STATE_DORMANT &&
624 link->status != DL_STATE_NONE);
625 __device_link_del(link);
626 }
627
628 device_links_write_unlock();
629 }
630
631 /* Device links support end. */
632
633 int (*platform_notify)(struct device *dev) = NULL;
634 int (*platform_notify_remove)(struct device *dev) = NULL;
635 static struct kobject *dev_kobj;
636 struct kobject *sysfs_dev_char_kobj;
637 struct kobject *sysfs_dev_block_kobj;
638
639 static DEFINE_MUTEX(device_hotplug_lock);
640
lock_device_hotplug(void)641 void lock_device_hotplug(void)
642 {
643 mutex_lock(&device_hotplug_lock);
644 }
645
unlock_device_hotplug(void)646 void unlock_device_hotplug(void)
647 {
648 mutex_unlock(&device_hotplug_lock);
649 }
650
lock_device_hotplug_sysfs(void)651 int lock_device_hotplug_sysfs(void)
652 {
653 if (mutex_trylock(&device_hotplug_lock))
654 return 0;
655
656 /* Avoid busy looping (5 ms of sleep should do). */
657 msleep(5);
658 return restart_syscall();
659 }
660
661 #ifdef CONFIG_BLOCK
device_is_not_partition(struct device * dev)662 static inline int device_is_not_partition(struct device *dev)
663 {
664 return !(dev->type == &part_type);
665 }
666 #else
device_is_not_partition(struct device * dev)667 static inline int device_is_not_partition(struct device *dev)
668 {
669 return 1;
670 }
671 #endif
672
673 /**
674 * dev_driver_string - Return a device's driver name, if at all possible
675 * @dev: struct device to get the name of
676 *
677 * Will return the device's driver's name if it is bound to a device. If
678 * the device is not bound to a driver, it will return the name of the bus
679 * it is attached to. If it is not attached to a bus either, an empty
680 * string will be returned.
681 */
dev_driver_string(const struct device * dev)682 const char *dev_driver_string(const struct device *dev)
683 {
684 struct device_driver *drv;
685
686 /* dev->driver can change to NULL underneath us because of unbinding,
687 * so be careful about accessing it. dev->bus and dev->class should
688 * never change once they are set, so they don't need special care.
689 */
690 drv = ACCESS_ONCE(dev->driver);
691 return drv ? drv->name :
692 (dev->bus ? dev->bus->name :
693 (dev->class ? dev->class->name : ""));
694 }
695 EXPORT_SYMBOL(dev_driver_string);
696
697 #define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
698
dev_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)699 static ssize_t dev_attr_show(struct kobject *kobj, struct attribute *attr,
700 char *buf)
701 {
702 struct device_attribute *dev_attr = to_dev_attr(attr);
703 struct device *dev = kobj_to_dev(kobj);
704 ssize_t ret = -EIO;
705
706 if (dev_attr->show)
707 ret = dev_attr->show(dev, dev_attr, buf);
708 if (ret >= (ssize_t)PAGE_SIZE) {
709 print_symbol("dev_attr_show: %s returned bad count\n",
710 (unsigned long)dev_attr->show);
711 }
712 return ret;
713 }
714
dev_attr_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t count)715 static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
716 const char *buf, size_t count)
717 {
718 struct device_attribute *dev_attr = to_dev_attr(attr);
719 struct device *dev = kobj_to_dev(kobj);
720 ssize_t ret = -EIO;
721
722 if (dev_attr->store)
723 ret = dev_attr->store(dev, dev_attr, buf, count);
724 return ret;
725 }
726
727 static const struct sysfs_ops dev_sysfs_ops = {
728 .show = dev_attr_show,
729 .store = dev_attr_store,
730 };
731
732 #define to_ext_attr(x) container_of(x, struct dev_ext_attribute, attr)
733
device_store_ulong(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)734 ssize_t device_store_ulong(struct device *dev,
735 struct device_attribute *attr,
736 const char *buf, size_t size)
737 {
738 struct dev_ext_attribute *ea = to_ext_attr(attr);
739 char *end;
740 unsigned long new = simple_strtoul(buf, &end, 0);
741 if (end == buf)
742 return -EINVAL;
743 *(unsigned long *)(ea->var) = new;
744 /* Always return full write size even if we didn't consume all */
745 return size;
746 }
747 EXPORT_SYMBOL_GPL(device_store_ulong);
748
device_show_ulong(struct device * dev,struct device_attribute * attr,char * buf)749 ssize_t device_show_ulong(struct device *dev,
750 struct device_attribute *attr,
751 char *buf)
752 {
753 struct dev_ext_attribute *ea = to_ext_attr(attr);
754 return snprintf(buf, PAGE_SIZE, "%lx\n", *(unsigned long *)(ea->var));
755 }
756 EXPORT_SYMBOL_GPL(device_show_ulong);
757
device_store_int(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)758 ssize_t device_store_int(struct device *dev,
759 struct device_attribute *attr,
760 const char *buf, size_t size)
761 {
762 struct dev_ext_attribute *ea = to_ext_attr(attr);
763 char *end;
764 long new = simple_strtol(buf, &end, 0);
765 if (end == buf || new > INT_MAX || new < INT_MIN)
766 return -EINVAL;
767 *(int *)(ea->var) = new;
768 /* Always return full write size even if we didn't consume all */
769 return size;
770 }
771 EXPORT_SYMBOL_GPL(device_store_int);
772
device_show_int(struct device * dev,struct device_attribute * attr,char * buf)773 ssize_t device_show_int(struct device *dev,
774 struct device_attribute *attr,
775 char *buf)
776 {
777 struct dev_ext_attribute *ea = to_ext_attr(attr);
778
779 return snprintf(buf, PAGE_SIZE, "%d\n", *(int *)(ea->var));
780 }
781 EXPORT_SYMBOL_GPL(device_show_int);
782
device_store_bool(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)783 ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
784 const char *buf, size_t size)
785 {
786 struct dev_ext_attribute *ea = to_ext_attr(attr);
787
788 if (strtobool(buf, ea->var) < 0)
789 return -EINVAL;
790
791 return size;
792 }
793 EXPORT_SYMBOL_GPL(device_store_bool);
794
device_show_bool(struct device * dev,struct device_attribute * attr,char * buf)795 ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
796 char *buf)
797 {
798 struct dev_ext_attribute *ea = to_ext_attr(attr);
799
800 return snprintf(buf, PAGE_SIZE, "%d\n", *(bool *)(ea->var));
801 }
802 EXPORT_SYMBOL_GPL(device_show_bool);
803
804 /**
805 * device_release - free device structure.
806 * @kobj: device's kobject.
807 *
808 * This is called once the reference count for the object
809 * reaches 0. We forward the call to the device's release
810 * method, which should handle actually freeing the structure.
811 */
device_release(struct kobject * kobj)812 static void device_release(struct kobject *kobj)
813 {
814 struct device *dev = kobj_to_dev(kobj);
815 struct device_private *p = dev->p;
816
817 /*
818 * Some platform devices are driven without driver attached
819 * and managed resources may have been acquired. Make sure
820 * all resources are released.
821 *
822 * Drivers still can add resources into device after device
823 * is deleted but alive, so release devres here to avoid
824 * possible memory leak.
825 */
826 devres_release_all(dev);
827
828 if (dev->release)
829 dev->release(dev);
830 else if (dev->type && dev->type->release)
831 dev->type->release(dev);
832 else if (dev->class && dev->class->dev_release)
833 dev->class->dev_release(dev);
834 else
835 WARN(1, KERN_ERR "Device '%s' does not have a release() "
836 "function, it is broken and must be fixed.\n",
837 dev_name(dev));
838 kfree(p);
839 }
840
device_namespace(struct kobject * kobj)841 static const void *device_namespace(struct kobject *kobj)
842 {
843 struct device *dev = kobj_to_dev(kobj);
844 const void *ns = NULL;
845
846 if (dev->class && dev->class->ns_type)
847 ns = dev->class->namespace(dev);
848
849 return ns;
850 }
851
852 static struct kobj_type device_ktype = {
853 .release = device_release,
854 .sysfs_ops = &dev_sysfs_ops,
855 .namespace = device_namespace,
856 };
857
858
dev_uevent_filter(struct kset * kset,struct kobject * kobj)859 static int dev_uevent_filter(struct kset *kset, struct kobject *kobj)
860 {
861 struct kobj_type *ktype = get_ktype(kobj);
862
863 if (ktype == &device_ktype) {
864 struct device *dev = kobj_to_dev(kobj);
865 if (dev->bus)
866 return 1;
867 if (dev->class)
868 return 1;
869 }
870 return 0;
871 }
872
dev_uevent_name(struct kset * kset,struct kobject * kobj)873 static const char *dev_uevent_name(struct kset *kset, struct kobject *kobj)
874 {
875 struct device *dev = kobj_to_dev(kobj);
876
877 if (dev->bus)
878 return dev->bus->name;
879 if (dev->class)
880 return dev->class->name;
881 return NULL;
882 }
883
dev_uevent(struct kset * kset,struct kobject * kobj,struct kobj_uevent_env * env)884 static int dev_uevent(struct kset *kset, struct kobject *kobj,
885 struct kobj_uevent_env *env)
886 {
887 struct device *dev = kobj_to_dev(kobj);
888 int retval = 0;
889
890 /* add device node properties if present */
891 if (MAJOR(dev->devt)) {
892 const char *tmp;
893 const char *name;
894 umode_t mode = 0;
895 kuid_t uid = GLOBAL_ROOT_UID;
896 kgid_t gid = GLOBAL_ROOT_GID;
897
898 add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt));
899 add_uevent_var(env, "MINOR=%u", MINOR(dev->devt));
900 name = device_get_devnode(dev, &mode, &uid, &gid, &tmp);
901 if (name) {
902 add_uevent_var(env, "DEVNAME=%s", name);
903 if (mode)
904 add_uevent_var(env, "DEVMODE=%#o", mode & 0777);
905 if (!uid_eq(uid, GLOBAL_ROOT_UID))
906 add_uevent_var(env, "DEVUID=%u", from_kuid(&init_user_ns, uid));
907 if (!gid_eq(gid, GLOBAL_ROOT_GID))
908 add_uevent_var(env, "DEVGID=%u", from_kgid(&init_user_ns, gid));
909 kfree(tmp);
910 }
911 }
912
913 if (dev->type && dev->type->name)
914 add_uevent_var(env, "DEVTYPE=%s", dev->type->name);
915
916 if (dev->driver)
917 add_uevent_var(env, "DRIVER=%s", dev->driver->name);
918
919 /* Add common DT information about the device */
920 of_device_uevent(dev, env);
921
922 /* have the bus specific function add its stuff */
923 if (dev->bus && dev->bus->uevent) {
924 retval = dev->bus->uevent(dev, env);
925 if (retval)
926 pr_debug("device: '%s': %s: bus uevent() returned %d\n",
927 dev_name(dev), __func__, retval);
928 }
929
930 /* have the class specific function add its stuff */
931 if (dev->class && dev->class->dev_uevent) {
932 retval = dev->class->dev_uevent(dev, env);
933 if (retval)
934 pr_debug("device: '%s': %s: class uevent() "
935 "returned %d\n", dev_name(dev),
936 __func__, retval);
937 }
938
939 /* have the device type specific function add its stuff */
940 if (dev->type && dev->type->uevent) {
941 retval = dev->type->uevent(dev, env);
942 if (retval)
943 pr_debug("device: '%s': %s: dev_type uevent() "
944 "returned %d\n", dev_name(dev),
945 __func__, retval);
946 }
947
948 return retval;
949 }
950
951 static const struct kset_uevent_ops device_uevent_ops = {
952 .filter = dev_uevent_filter,
953 .name = dev_uevent_name,
954 .uevent = dev_uevent,
955 };
956
uevent_show(struct device * dev,struct device_attribute * attr,char * buf)957 static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
958 char *buf)
959 {
960 struct kobject *top_kobj;
961 struct kset *kset;
962 struct kobj_uevent_env *env = NULL;
963 int i;
964 size_t count = 0;
965 int retval;
966
967 /* search the kset, the device belongs to */
968 top_kobj = &dev->kobj;
969 while (!top_kobj->kset && top_kobj->parent)
970 top_kobj = top_kobj->parent;
971 if (!top_kobj->kset)
972 goto out;
973
974 kset = top_kobj->kset;
975 if (!kset->uevent_ops || !kset->uevent_ops->uevent)
976 goto out;
977
978 /* respect filter */
979 if (kset->uevent_ops && kset->uevent_ops->filter)
980 if (!kset->uevent_ops->filter(kset, &dev->kobj))
981 goto out;
982
983 env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
984 if (!env)
985 return -ENOMEM;
986
987 /* let the kset specific function add its keys */
988 retval = kset->uevent_ops->uevent(kset, &dev->kobj, env);
989 if (retval)
990 goto out;
991
992 /* copy keys to file */
993 for (i = 0; i < env->envp_idx; i++)
994 count += sprintf(&buf[count], "%s\n", env->envp[i]);
995 out:
996 kfree(env);
997 return count;
998 }
999
uevent_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1000 static ssize_t uevent_store(struct device *dev, struct device_attribute *attr,
1001 const char *buf, size_t count)
1002 {
1003 int rc;
1004
1005 rc = kobject_synth_uevent(&dev->kobj, buf, count);
1006
1007 if (rc) {
1008 dev_err(dev, "uevent: failed to send synthetic uevent\n");
1009 return rc;
1010 }
1011
1012 return count;
1013 }
1014 static DEVICE_ATTR_RW(uevent);
1015
online_show(struct device * dev,struct device_attribute * attr,char * buf)1016 static ssize_t online_show(struct device *dev, struct device_attribute *attr,
1017 char *buf)
1018 {
1019 bool val;
1020
1021 device_lock(dev);
1022 val = !dev->offline;
1023 device_unlock(dev);
1024 return sprintf(buf, "%u\n", val);
1025 }
1026
online_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1027 static ssize_t online_store(struct device *dev, struct device_attribute *attr,
1028 const char *buf, size_t count)
1029 {
1030 bool val;
1031 int ret;
1032
1033 ret = strtobool(buf, &val);
1034 if (ret < 0)
1035 return ret;
1036
1037 ret = lock_device_hotplug_sysfs();
1038 if (ret)
1039 return ret;
1040
1041 ret = val ? device_online(dev) : device_offline(dev);
1042 unlock_device_hotplug();
1043 return ret < 0 ? ret : count;
1044 }
1045 static DEVICE_ATTR_RW(online);
1046
device_add_groups(struct device * dev,const struct attribute_group ** groups)1047 int device_add_groups(struct device *dev, const struct attribute_group **groups)
1048 {
1049 return sysfs_create_groups(&dev->kobj, groups);
1050 }
1051 EXPORT_SYMBOL_GPL(device_add_groups);
1052
device_remove_groups(struct device * dev,const struct attribute_group ** groups)1053 void device_remove_groups(struct device *dev,
1054 const struct attribute_group **groups)
1055 {
1056 sysfs_remove_groups(&dev->kobj, groups);
1057 }
1058 EXPORT_SYMBOL_GPL(device_remove_groups);
1059
1060 union device_attr_group_devres {
1061 const struct attribute_group *group;
1062 const struct attribute_group **groups;
1063 };
1064
devm_attr_group_match(struct device * dev,void * res,void * data)1065 static int devm_attr_group_match(struct device *dev, void *res, void *data)
1066 {
1067 return ((union device_attr_group_devres *)res)->group == data;
1068 }
1069
devm_attr_group_remove(struct device * dev,void * res)1070 static void devm_attr_group_remove(struct device *dev, void *res)
1071 {
1072 union device_attr_group_devres *devres = res;
1073 const struct attribute_group *group = devres->group;
1074
1075 dev_dbg(dev, "%s: removing group %p\n", __func__, group);
1076 sysfs_remove_group(&dev->kobj, group);
1077 }
1078
devm_attr_groups_remove(struct device * dev,void * res)1079 static void devm_attr_groups_remove(struct device *dev, void *res)
1080 {
1081 union device_attr_group_devres *devres = res;
1082 const struct attribute_group **groups = devres->groups;
1083
1084 dev_dbg(dev, "%s: removing groups %p\n", __func__, groups);
1085 sysfs_remove_groups(&dev->kobj, groups);
1086 }
1087
1088 /**
1089 * devm_device_add_group - given a device, create a managed attribute group
1090 * @dev: The device to create the group for
1091 * @grp: The attribute group to create
1092 *
1093 * This function creates a group for the first time. It will explicitly
1094 * warn and error if any of the attribute files being created already exist.
1095 *
1096 * Returns 0 on success or error code on failure.
1097 */
devm_device_add_group(struct device * dev,const struct attribute_group * grp)1098 int devm_device_add_group(struct device *dev, const struct attribute_group *grp)
1099 {
1100 union device_attr_group_devres *devres;
1101 int error;
1102
1103 devres = devres_alloc(devm_attr_group_remove,
1104 sizeof(*devres), GFP_KERNEL);
1105 if (!devres)
1106 return -ENOMEM;
1107
1108 error = sysfs_create_group(&dev->kobj, grp);
1109 if (error) {
1110 devres_free(devres);
1111 return error;
1112 }
1113
1114 devres->group = grp;
1115 devres_add(dev, devres);
1116 return 0;
1117 }
1118 EXPORT_SYMBOL_GPL(devm_device_add_group);
1119
1120 /**
1121 * devm_device_remove_group: remove a managed group from a device
1122 * @dev: device to remove the group from
1123 * @grp: group to remove
1124 *
1125 * This function removes a group of attributes from a device. The attributes
1126 * previously have to have been created for this group, otherwise it will fail.
1127 */
devm_device_remove_group(struct device * dev,const struct attribute_group * grp)1128 void devm_device_remove_group(struct device *dev,
1129 const struct attribute_group *grp)
1130 {
1131 WARN_ON(devres_release(dev, devm_attr_group_remove,
1132 devm_attr_group_match,
1133 /* cast away const */ (void *)grp));
1134 }
1135 EXPORT_SYMBOL_GPL(devm_device_remove_group);
1136
1137 /**
1138 * devm_device_add_groups - create a bunch of managed attribute groups
1139 * @dev: The device to create the group for
1140 * @groups: The attribute groups to create, NULL terminated
1141 *
1142 * This function creates a bunch of managed attribute groups. If an error
1143 * occurs when creating a group, all previously created groups will be
1144 * removed, unwinding everything back to the original state when this
1145 * function was called. It will explicitly warn and error if any of the
1146 * attribute files being created already exist.
1147 *
1148 * Returns 0 on success or error code from sysfs_create_group on failure.
1149 */
devm_device_add_groups(struct device * dev,const struct attribute_group ** groups)1150 int devm_device_add_groups(struct device *dev,
1151 const struct attribute_group **groups)
1152 {
1153 union device_attr_group_devres *devres;
1154 int error;
1155
1156 devres = devres_alloc(devm_attr_groups_remove,
1157 sizeof(*devres), GFP_KERNEL);
1158 if (!devres)
1159 return -ENOMEM;
1160
1161 error = sysfs_create_groups(&dev->kobj, groups);
1162 if (error) {
1163 devres_free(devres);
1164 return error;
1165 }
1166
1167 devres->groups = groups;
1168 devres_add(dev, devres);
1169 return 0;
1170 }
1171 EXPORT_SYMBOL_GPL(devm_device_add_groups);
1172
1173 /**
1174 * devm_device_remove_groups - remove a list of managed groups
1175 *
1176 * @dev: The device for the groups to be removed from
1177 * @groups: NULL terminated list of groups to be removed
1178 *
1179 * If groups is not NULL, remove the specified groups from the device.
1180 */
devm_device_remove_groups(struct device * dev,const struct attribute_group ** groups)1181 void devm_device_remove_groups(struct device *dev,
1182 const struct attribute_group **groups)
1183 {
1184 WARN_ON(devres_release(dev, devm_attr_groups_remove,
1185 devm_attr_group_match,
1186 /* cast away const */ (void *)groups));
1187 }
1188 EXPORT_SYMBOL_GPL(devm_device_remove_groups);
1189
device_add_attrs(struct device * dev)1190 static int device_add_attrs(struct device *dev)
1191 {
1192 struct class *class = dev->class;
1193 const struct device_type *type = dev->type;
1194 int error;
1195
1196 if (class) {
1197 error = device_add_groups(dev, class->dev_groups);
1198 if (error)
1199 return error;
1200 }
1201
1202 if (type) {
1203 error = device_add_groups(dev, type->groups);
1204 if (error)
1205 goto err_remove_class_groups;
1206 }
1207
1208 error = device_add_groups(dev, dev->groups);
1209 if (error)
1210 goto err_remove_type_groups;
1211
1212 if (device_supports_offline(dev) && !dev->offline_disabled) {
1213 error = device_create_file(dev, &dev_attr_online);
1214 if (error)
1215 goto err_remove_dev_groups;
1216 }
1217
1218 return 0;
1219
1220 err_remove_dev_groups:
1221 device_remove_groups(dev, dev->groups);
1222 err_remove_type_groups:
1223 if (type)
1224 device_remove_groups(dev, type->groups);
1225 err_remove_class_groups:
1226 if (class)
1227 device_remove_groups(dev, class->dev_groups);
1228
1229 return error;
1230 }
1231
device_remove_attrs(struct device * dev)1232 static void device_remove_attrs(struct device *dev)
1233 {
1234 struct class *class = dev->class;
1235 const struct device_type *type = dev->type;
1236
1237 device_remove_file(dev, &dev_attr_online);
1238 device_remove_groups(dev, dev->groups);
1239
1240 if (type)
1241 device_remove_groups(dev, type->groups);
1242
1243 if (class)
1244 device_remove_groups(dev, class->dev_groups);
1245 }
1246
dev_show(struct device * dev,struct device_attribute * attr,char * buf)1247 static ssize_t dev_show(struct device *dev, struct device_attribute *attr,
1248 char *buf)
1249 {
1250 return print_dev_t(buf, dev->devt);
1251 }
1252 static DEVICE_ATTR_RO(dev);
1253
1254 /* /sys/devices/ */
1255 struct kset *devices_kset;
1256
1257 /**
1258 * devices_kset_move_before - Move device in the devices_kset's list.
1259 * @deva: Device to move.
1260 * @devb: Device @deva should come before.
1261 */
devices_kset_move_before(struct device * deva,struct device * devb)1262 static void devices_kset_move_before(struct device *deva, struct device *devb)
1263 {
1264 if (!devices_kset)
1265 return;
1266 pr_debug("devices_kset: Moving %s before %s\n",
1267 dev_name(deva), dev_name(devb));
1268 spin_lock(&devices_kset->list_lock);
1269 list_move_tail(&deva->kobj.entry, &devb->kobj.entry);
1270 spin_unlock(&devices_kset->list_lock);
1271 }
1272
1273 /**
1274 * devices_kset_move_after - Move device in the devices_kset's list.
1275 * @deva: Device to move
1276 * @devb: Device @deva should come after.
1277 */
devices_kset_move_after(struct device * deva,struct device * devb)1278 static void devices_kset_move_after(struct device *deva, struct device *devb)
1279 {
1280 if (!devices_kset)
1281 return;
1282 pr_debug("devices_kset: Moving %s after %s\n",
1283 dev_name(deva), dev_name(devb));
1284 spin_lock(&devices_kset->list_lock);
1285 list_move(&deva->kobj.entry, &devb->kobj.entry);
1286 spin_unlock(&devices_kset->list_lock);
1287 }
1288
1289 /**
1290 * devices_kset_move_last - move the device to the end of devices_kset's list.
1291 * @dev: device to move
1292 */
devices_kset_move_last(struct device * dev)1293 void devices_kset_move_last(struct device *dev)
1294 {
1295 if (!devices_kset)
1296 return;
1297 pr_debug("devices_kset: Moving %s to end of list\n", dev_name(dev));
1298 spin_lock(&devices_kset->list_lock);
1299 list_move_tail(&dev->kobj.entry, &devices_kset->list);
1300 spin_unlock(&devices_kset->list_lock);
1301 }
1302
1303 /**
1304 * device_create_file - create sysfs attribute file for device.
1305 * @dev: device.
1306 * @attr: device attribute descriptor.
1307 */
device_create_file(struct device * dev,const struct device_attribute * attr)1308 int device_create_file(struct device *dev,
1309 const struct device_attribute *attr)
1310 {
1311 int error = 0;
1312
1313 if (dev) {
1314 WARN(((attr->attr.mode & S_IWUGO) && !attr->store),
1315 "Attribute %s: write permission without 'store'\n",
1316 attr->attr.name);
1317 WARN(((attr->attr.mode & S_IRUGO) && !attr->show),
1318 "Attribute %s: read permission without 'show'\n",
1319 attr->attr.name);
1320 error = sysfs_create_file(&dev->kobj, &attr->attr);
1321 }
1322
1323 return error;
1324 }
1325 EXPORT_SYMBOL_GPL(device_create_file);
1326
1327 /**
1328 * device_remove_file - remove sysfs attribute file.
1329 * @dev: device.
1330 * @attr: device attribute descriptor.
1331 */
device_remove_file(struct device * dev,const struct device_attribute * attr)1332 void device_remove_file(struct device *dev,
1333 const struct device_attribute *attr)
1334 {
1335 if (dev)
1336 sysfs_remove_file(&dev->kobj, &attr->attr);
1337 }
1338 EXPORT_SYMBOL_GPL(device_remove_file);
1339
1340 /**
1341 * device_remove_file_self - remove sysfs attribute file from its own method.
1342 * @dev: device.
1343 * @attr: device attribute descriptor.
1344 *
1345 * See kernfs_remove_self() for details.
1346 */
device_remove_file_self(struct device * dev,const struct device_attribute * attr)1347 bool device_remove_file_self(struct device *dev,
1348 const struct device_attribute *attr)
1349 {
1350 if (dev)
1351 return sysfs_remove_file_self(&dev->kobj, &attr->attr);
1352 else
1353 return false;
1354 }
1355 EXPORT_SYMBOL_GPL(device_remove_file_self);
1356
1357 /**
1358 * device_create_bin_file - create sysfs binary attribute file for device.
1359 * @dev: device.
1360 * @attr: device binary attribute descriptor.
1361 */
device_create_bin_file(struct device * dev,const struct bin_attribute * attr)1362 int device_create_bin_file(struct device *dev,
1363 const struct bin_attribute *attr)
1364 {
1365 int error = -EINVAL;
1366 if (dev)
1367 error = sysfs_create_bin_file(&dev->kobj, attr);
1368 return error;
1369 }
1370 EXPORT_SYMBOL_GPL(device_create_bin_file);
1371
1372 /**
1373 * device_remove_bin_file - remove sysfs binary attribute file
1374 * @dev: device.
1375 * @attr: device binary attribute descriptor.
1376 */
device_remove_bin_file(struct device * dev,const struct bin_attribute * attr)1377 void device_remove_bin_file(struct device *dev,
1378 const struct bin_attribute *attr)
1379 {
1380 if (dev)
1381 sysfs_remove_bin_file(&dev->kobj, attr);
1382 }
1383 EXPORT_SYMBOL_GPL(device_remove_bin_file);
1384
klist_children_get(struct klist_node * n)1385 static void klist_children_get(struct klist_node *n)
1386 {
1387 struct device_private *p = to_device_private_parent(n);
1388 struct device *dev = p->device;
1389
1390 get_device(dev);
1391 }
1392
klist_children_put(struct klist_node * n)1393 static void klist_children_put(struct klist_node *n)
1394 {
1395 struct device_private *p = to_device_private_parent(n);
1396 struct device *dev = p->device;
1397
1398 put_device(dev);
1399 }
1400
1401 /**
1402 * device_initialize - init device structure.
1403 * @dev: device.
1404 *
1405 * This prepares the device for use by other layers by initializing
1406 * its fields.
1407 * It is the first half of device_register(), if called by
1408 * that function, though it can also be called separately, so one
1409 * may use @dev's fields. In particular, get_device()/put_device()
1410 * may be used for reference counting of @dev after calling this
1411 * function.
1412 *
1413 * All fields in @dev must be initialized by the caller to 0, except
1414 * for those explicitly set to some other value. The simplest
1415 * approach is to use kzalloc() to allocate the structure containing
1416 * @dev.
1417 *
1418 * NOTE: Use put_device() to give up your reference instead of freeing
1419 * @dev directly once you have called this function.
1420 */
device_initialize(struct device * dev)1421 void device_initialize(struct device *dev)
1422 {
1423 dev->kobj.kset = devices_kset;
1424 kobject_init(&dev->kobj, &device_ktype);
1425 INIT_LIST_HEAD(&dev->dma_pools);
1426 mutex_init(&dev->mutex);
1427 lockdep_set_novalidate_class(&dev->mutex);
1428 spin_lock_init(&dev->devres_lock);
1429 INIT_LIST_HEAD(&dev->devres_head);
1430 device_pm_init(dev);
1431 set_dev_node(dev, -1);
1432 #ifdef CONFIG_GENERIC_MSI_IRQ
1433 INIT_LIST_HEAD(&dev->msi_list);
1434 #endif
1435 INIT_LIST_HEAD(&dev->links.consumers);
1436 INIT_LIST_HEAD(&dev->links.suppliers);
1437 dev->links.status = DL_DEV_NO_DRIVER;
1438 }
1439 EXPORT_SYMBOL_GPL(device_initialize);
1440
virtual_device_parent(struct device * dev)1441 struct kobject *virtual_device_parent(struct device *dev)
1442 {
1443 static struct kobject *virtual_dir = NULL;
1444
1445 if (!virtual_dir)
1446 virtual_dir = kobject_create_and_add("virtual",
1447 &devices_kset->kobj);
1448
1449 return virtual_dir;
1450 }
1451
1452 struct class_dir {
1453 struct kobject kobj;
1454 struct class *class;
1455 };
1456
1457 #define to_class_dir(obj) container_of(obj, struct class_dir, kobj)
1458
class_dir_release(struct kobject * kobj)1459 static void class_dir_release(struct kobject *kobj)
1460 {
1461 struct class_dir *dir = to_class_dir(kobj);
1462 kfree(dir);
1463 }
1464
1465 static const
class_dir_child_ns_type(struct kobject * kobj)1466 struct kobj_ns_type_operations *class_dir_child_ns_type(struct kobject *kobj)
1467 {
1468 struct class_dir *dir = to_class_dir(kobj);
1469 return dir->class->ns_type;
1470 }
1471
1472 static struct kobj_type class_dir_ktype = {
1473 .release = class_dir_release,
1474 .sysfs_ops = &kobj_sysfs_ops,
1475 .child_ns_type = class_dir_child_ns_type
1476 };
1477
1478 static struct kobject *
class_dir_create_and_add(struct class * class,struct kobject * parent_kobj)1479 class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
1480 {
1481 struct class_dir *dir;
1482 int retval;
1483
1484 dir = kzalloc(sizeof(*dir), GFP_KERNEL);
1485 if (!dir)
1486 return ERR_PTR(-ENOMEM);
1487
1488 dir->class = class;
1489 kobject_init(&dir->kobj, &class_dir_ktype);
1490
1491 dir->kobj.kset = &class->p->glue_dirs;
1492
1493 retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name);
1494 if (retval < 0) {
1495 kobject_put(&dir->kobj);
1496 return ERR_PTR(retval);
1497 }
1498 return &dir->kobj;
1499 }
1500
1501 static DEFINE_MUTEX(gdp_mutex);
1502
get_device_parent(struct device * dev,struct device * parent)1503 static struct kobject *get_device_parent(struct device *dev,
1504 struct device *parent)
1505 {
1506 if (dev->class) {
1507 struct kobject *kobj = NULL;
1508 struct kobject *parent_kobj;
1509 struct kobject *k;
1510
1511 #ifdef CONFIG_BLOCK
1512 /* block disks show up in /sys/block */
1513 if (sysfs_deprecated && dev->class == &block_class) {
1514 if (parent && parent->class == &block_class)
1515 return &parent->kobj;
1516 return &block_class.p->subsys.kobj;
1517 }
1518 #endif
1519
1520 /*
1521 * If we have no parent, we live in "virtual".
1522 * Class-devices with a non class-device as parent, live
1523 * in a "glue" directory to prevent namespace collisions.
1524 */
1525 if (parent == NULL)
1526 parent_kobj = virtual_device_parent(dev);
1527 else if (parent->class && !dev->class->ns_type)
1528 return &parent->kobj;
1529 else
1530 parent_kobj = &parent->kobj;
1531
1532 mutex_lock(&gdp_mutex);
1533
1534 /* find our class-directory at the parent and reference it */
1535 spin_lock(&dev->class->p->glue_dirs.list_lock);
1536 list_for_each_entry(k, &dev->class->p->glue_dirs.list, entry)
1537 if (k->parent == parent_kobj) {
1538 kobj = kobject_get(k);
1539 break;
1540 }
1541 spin_unlock(&dev->class->p->glue_dirs.list_lock);
1542 if (kobj) {
1543 mutex_unlock(&gdp_mutex);
1544 return kobj;
1545 }
1546
1547 /* or create a new class-directory at the parent device */
1548 k = class_dir_create_and_add(dev->class, parent_kobj);
1549 /* do not emit an uevent for this simple "glue" directory */
1550 mutex_unlock(&gdp_mutex);
1551 return k;
1552 }
1553
1554 /* subsystems can specify a default root directory for their devices */
1555 if (!parent && dev->bus && dev->bus->dev_root)
1556 return &dev->bus->dev_root->kobj;
1557
1558 if (parent)
1559 return &parent->kobj;
1560 return NULL;
1561 }
1562
live_in_glue_dir(struct kobject * kobj,struct device * dev)1563 static inline bool live_in_glue_dir(struct kobject *kobj,
1564 struct device *dev)
1565 {
1566 if (!kobj || !dev->class ||
1567 kobj->kset != &dev->class->p->glue_dirs)
1568 return false;
1569 return true;
1570 }
1571
get_glue_dir(struct device * dev)1572 static inline struct kobject *get_glue_dir(struct device *dev)
1573 {
1574 return dev->kobj.parent;
1575 }
1576
1577 /*
1578 * make sure cleaning up dir as the last step, we need to make
1579 * sure .release handler of kobject is run with holding the
1580 * global lock
1581 */
cleanup_glue_dir(struct device * dev,struct kobject * glue_dir)1582 static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
1583 {
1584 unsigned int ref;
1585
1586 /* see if we live in a "glue" directory */
1587 if (!live_in_glue_dir(glue_dir, dev))
1588 return;
1589
1590 mutex_lock(&gdp_mutex);
1591 /**
1592 * There is a race condition between removing glue directory
1593 * and adding a new device under the glue directory.
1594 *
1595 * CPU1: CPU2:
1596 *
1597 * device_add()
1598 * get_device_parent()
1599 * class_dir_create_and_add()
1600 * kobject_add_internal()
1601 * create_dir() // create glue_dir
1602 *
1603 * device_add()
1604 * get_device_parent()
1605 * kobject_get() // get glue_dir
1606 *
1607 * device_del()
1608 * cleanup_glue_dir()
1609 * kobject_del(glue_dir)
1610 *
1611 * kobject_add()
1612 * kobject_add_internal()
1613 * create_dir() // in glue_dir
1614 * sysfs_create_dir_ns()
1615 * kernfs_create_dir_ns(sd)
1616 *
1617 * sysfs_remove_dir() // glue_dir->sd=NULL
1618 * sysfs_put() // free glue_dir->sd
1619 *
1620 * // sd is freed
1621 * kernfs_new_node(sd)
1622 * kernfs_get(glue_dir)
1623 * kernfs_add_one()
1624 * kernfs_put()
1625 *
1626 * Before CPU1 remove last child device under glue dir, if CPU2 add
1627 * a new device under glue dir, the glue_dir kobject reference count
1628 * will be increase to 2 in kobject_get(k). And CPU2 has been called
1629 * kernfs_create_dir_ns(). Meanwhile, CPU1 call sysfs_remove_dir()
1630 * and sysfs_put(). This result in glue_dir->sd is freed.
1631 *
1632 * Then the CPU2 will see a stale "empty" but still potentially used
1633 * glue dir around in kernfs_new_node().
1634 *
1635 * In order to avoid this happening, we also should make sure that
1636 * kernfs_node for glue_dir is released in CPU1 only when refcount
1637 * for glue_dir kobj is 1.
1638 */
1639 ref = kref_read(&glue_dir->kref);
1640 if (!kobject_has_children(glue_dir) && !--ref)
1641 kobject_del(glue_dir);
1642 kobject_put(glue_dir);
1643 mutex_unlock(&gdp_mutex);
1644 }
1645
device_add_class_symlinks(struct device * dev)1646 static int device_add_class_symlinks(struct device *dev)
1647 {
1648 struct device_node *of_node = dev_of_node(dev);
1649 int error;
1650
1651 if (of_node) {
1652 error = sysfs_create_link(&dev->kobj, &of_node->kobj,"of_node");
1653 if (error)
1654 dev_warn(dev, "Error %d creating of_node link\n",error);
1655 /* An error here doesn't warrant bringing down the device */
1656 }
1657
1658 if (!dev->class)
1659 return 0;
1660
1661 error = sysfs_create_link(&dev->kobj,
1662 &dev->class->p->subsys.kobj,
1663 "subsystem");
1664 if (error)
1665 goto out_devnode;
1666
1667 if (dev->parent && device_is_not_partition(dev)) {
1668 error = sysfs_create_link(&dev->kobj, &dev->parent->kobj,
1669 "device");
1670 if (error)
1671 goto out_subsys;
1672 }
1673
1674 #ifdef CONFIG_BLOCK
1675 /* /sys/block has directories and does not need symlinks */
1676 if (sysfs_deprecated && dev->class == &block_class)
1677 return 0;
1678 #endif
1679
1680 /* link in the class directory pointing to the device */
1681 error = sysfs_create_link(&dev->class->p->subsys.kobj,
1682 &dev->kobj, dev_name(dev));
1683 if (error)
1684 goto out_device;
1685
1686 return 0;
1687
1688 out_device:
1689 sysfs_remove_link(&dev->kobj, "device");
1690
1691 out_subsys:
1692 sysfs_remove_link(&dev->kobj, "subsystem");
1693 out_devnode:
1694 sysfs_remove_link(&dev->kobj, "of_node");
1695 return error;
1696 }
1697
device_remove_class_symlinks(struct device * dev)1698 static void device_remove_class_symlinks(struct device *dev)
1699 {
1700 if (dev_of_node(dev))
1701 sysfs_remove_link(&dev->kobj, "of_node");
1702
1703 if (!dev->class)
1704 return;
1705
1706 if (dev->parent && device_is_not_partition(dev))
1707 sysfs_remove_link(&dev->kobj, "device");
1708 sysfs_remove_link(&dev->kobj, "subsystem");
1709 #ifdef CONFIG_BLOCK
1710 if (sysfs_deprecated && dev->class == &block_class)
1711 return;
1712 #endif
1713 sysfs_delete_link(&dev->class->p->subsys.kobj, &dev->kobj, dev_name(dev));
1714 }
1715
1716 /**
1717 * dev_set_name - set a device name
1718 * @dev: device
1719 * @fmt: format string for the device's name
1720 */
dev_set_name(struct device * dev,const char * fmt,...)1721 int dev_set_name(struct device *dev, const char *fmt, ...)
1722 {
1723 va_list vargs;
1724 int err;
1725
1726 va_start(vargs, fmt);
1727 err = kobject_set_name_vargs(&dev->kobj, fmt, vargs);
1728 va_end(vargs);
1729 return err;
1730 }
1731 EXPORT_SYMBOL_GPL(dev_set_name);
1732
1733 /**
1734 * device_to_dev_kobj - select a /sys/dev/ directory for the device
1735 * @dev: device
1736 *
1737 * By default we select char/ for new entries. Setting class->dev_obj
1738 * to NULL prevents an entry from being created. class->dev_kobj must
1739 * be set (or cleared) before any devices are registered to the class
1740 * otherwise device_create_sys_dev_entry() and
1741 * device_remove_sys_dev_entry() will disagree about the presence of
1742 * the link.
1743 */
device_to_dev_kobj(struct device * dev)1744 static struct kobject *device_to_dev_kobj(struct device *dev)
1745 {
1746 struct kobject *kobj;
1747
1748 if (dev->class)
1749 kobj = dev->class->dev_kobj;
1750 else
1751 kobj = sysfs_dev_char_kobj;
1752
1753 return kobj;
1754 }
1755
device_create_sys_dev_entry(struct device * dev)1756 static int device_create_sys_dev_entry(struct device *dev)
1757 {
1758 struct kobject *kobj = device_to_dev_kobj(dev);
1759 int error = 0;
1760 char devt_str[15];
1761
1762 if (kobj) {
1763 format_dev_t(devt_str, dev->devt);
1764 error = sysfs_create_link(kobj, &dev->kobj, devt_str);
1765 }
1766
1767 return error;
1768 }
1769
device_remove_sys_dev_entry(struct device * dev)1770 static void device_remove_sys_dev_entry(struct device *dev)
1771 {
1772 struct kobject *kobj = device_to_dev_kobj(dev);
1773 char devt_str[15];
1774
1775 if (kobj) {
1776 format_dev_t(devt_str, dev->devt);
1777 sysfs_remove_link(kobj, devt_str);
1778 }
1779 }
1780
device_private_init(struct device * dev)1781 int device_private_init(struct device *dev)
1782 {
1783 dev->p = kzalloc(sizeof(*dev->p), GFP_KERNEL);
1784 if (!dev->p)
1785 return -ENOMEM;
1786 dev->p->device = dev;
1787 klist_init(&dev->p->klist_children, klist_children_get,
1788 klist_children_put);
1789 INIT_LIST_HEAD(&dev->p->deferred_probe);
1790 return 0;
1791 }
1792
1793 /**
1794 * device_add - add device to device hierarchy.
1795 * @dev: device.
1796 *
1797 * This is part 2 of device_register(), though may be called
1798 * separately _iff_ device_initialize() has been called separately.
1799 *
1800 * This adds @dev to the kobject hierarchy via kobject_add(), adds it
1801 * to the global and sibling lists for the device, then
1802 * adds it to the other relevant subsystems of the driver model.
1803 *
1804 * Do not call this routine or device_register() more than once for
1805 * any device structure. The driver model core is not designed to work
1806 * with devices that get unregistered and then spring back to life.
1807 * (Among other things, it's very hard to guarantee that all references
1808 * to the previous incarnation of @dev have been dropped.) Allocate
1809 * and register a fresh new struct device instead.
1810 *
1811 * NOTE: _Never_ directly free @dev after calling this function, even
1812 * if it returned an error! Always use put_device() to give up your
1813 * reference instead.
1814 */
device_add(struct device * dev)1815 int device_add(struct device *dev)
1816 {
1817 struct device *parent;
1818 struct kobject *kobj;
1819 struct class_interface *class_intf;
1820 int error = -EINVAL;
1821 struct kobject *glue_dir = NULL;
1822
1823 dev = get_device(dev);
1824 if (!dev)
1825 goto done;
1826
1827 if (!dev->p) {
1828 error = device_private_init(dev);
1829 if (error)
1830 goto done;
1831 }
1832
1833 /*
1834 * for statically allocated devices, which should all be converted
1835 * some day, we need to initialize the name. We prevent reading back
1836 * the name, and force the use of dev_name()
1837 */
1838 if (dev->init_name) {
1839 dev_set_name(dev, "%s", dev->init_name);
1840 dev->init_name = NULL;
1841 }
1842
1843 /* subsystems can specify simple device enumeration */
1844 if (!dev_name(dev) && dev->bus && dev->bus->dev_name)
1845 dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id);
1846
1847 if (!dev_name(dev)) {
1848 error = -EINVAL;
1849 goto name_error;
1850 }
1851
1852 pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
1853
1854 parent = get_device(dev->parent);
1855 kobj = get_device_parent(dev, parent);
1856 if (IS_ERR(kobj)) {
1857 error = PTR_ERR(kobj);
1858 goto parent_error;
1859 }
1860 if (kobj)
1861 dev->kobj.parent = kobj;
1862
1863 /* use parent numa_node */
1864 if (parent && (dev_to_node(dev) == NUMA_NO_NODE))
1865 set_dev_node(dev, dev_to_node(parent));
1866
1867 /* first, register with generic layer. */
1868 /* we require the name to be set before, and pass NULL */
1869 error = kobject_add(&dev->kobj, dev->kobj.parent, NULL);
1870 if (error) {
1871 glue_dir = get_glue_dir(dev);
1872 goto Error;
1873 }
1874
1875 /* notify platform of device entry */
1876 if (platform_notify)
1877 platform_notify(dev);
1878
1879 error = device_create_file(dev, &dev_attr_uevent);
1880 if (error)
1881 goto attrError;
1882
1883 error = device_add_class_symlinks(dev);
1884 if (error)
1885 goto SymlinkError;
1886 error = device_add_attrs(dev);
1887 if (error)
1888 goto AttrsError;
1889 error = bus_add_device(dev);
1890 if (error)
1891 goto BusError;
1892 error = dpm_sysfs_add(dev);
1893 if (error)
1894 goto DPMError;
1895 device_pm_add(dev);
1896
1897 if (MAJOR(dev->devt)) {
1898 error = device_create_file(dev, &dev_attr_dev);
1899 if (error)
1900 goto DevAttrError;
1901
1902 error = device_create_sys_dev_entry(dev);
1903 if (error)
1904 goto SysEntryError;
1905
1906 devtmpfs_create_node(dev);
1907 }
1908
1909 /* Notify clients of device addition. This call must come
1910 * after dpm_sysfs_add() and before kobject_uevent().
1911 */
1912 if (dev->bus)
1913 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
1914 BUS_NOTIFY_ADD_DEVICE, dev);
1915
1916 kobject_uevent(&dev->kobj, KOBJ_ADD);
1917 bus_probe_device(dev);
1918 if (parent)
1919 klist_add_tail(&dev->p->knode_parent,
1920 &parent->p->klist_children);
1921
1922 if (dev->class) {
1923 mutex_lock(&dev->class->p->mutex);
1924 /* tie the class to the device */
1925 klist_add_tail(&dev->knode_class,
1926 &dev->class->p->klist_devices);
1927
1928 /* notify any interfaces that the device is here */
1929 list_for_each_entry(class_intf,
1930 &dev->class->p->interfaces, node)
1931 if (class_intf->add_dev)
1932 class_intf->add_dev(dev, class_intf);
1933 mutex_unlock(&dev->class->p->mutex);
1934 }
1935 done:
1936 put_device(dev);
1937 return error;
1938 SysEntryError:
1939 if (MAJOR(dev->devt))
1940 device_remove_file(dev, &dev_attr_dev);
1941 DevAttrError:
1942 device_pm_remove(dev);
1943 dpm_sysfs_remove(dev);
1944 DPMError:
1945 bus_remove_device(dev);
1946 BusError:
1947 device_remove_attrs(dev);
1948 AttrsError:
1949 device_remove_class_symlinks(dev);
1950 SymlinkError:
1951 device_remove_file(dev, &dev_attr_uevent);
1952 attrError:
1953 kobject_uevent(&dev->kobj, KOBJ_REMOVE);
1954 glue_dir = get_glue_dir(dev);
1955 kobject_del(&dev->kobj);
1956 Error:
1957 cleanup_glue_dir(dev, glue_dir);
1958 parent_error:
1959 put_device(parent);
1960 name_error:
1961 kfree(dev->p);
1962 dev->p = NULL;
1963 goto done;
1964 }
1965 EXPORT_SYMBOL_GPL(device_add);
1966
1967 /**
1968 * device_register - register a device with the system.
1969 * @dev: pointer to the device structure
1970 *
1971 * This happens in two clean steps - initialize the device
1972 * and add it to the system. The two steps can be called
1973 * separately, but this is the easiest and most common.
1974 * I.e. you should only call the two helpers separately if
1975 * have a clearly defined need to use and refcount the device
1976 * before it is added to the hierarchy.
1977 *
1978 * For more information, see the kerneldoc for device_initialize()
1979 * and device_add().
1980 *
1981 * NOTE: _Never_ directly free @dev after calling this function, even
1982 * if it returned an error! Always use put_device() to give up the
1983 * reference initialized in this function instead.
1984 */
device_register(struct device * dev)1985 int device_register(struct device *dev)
1986 {
1987 device_initialize(dev);
1988 return device_add(dev);
1989 }
1990 EXPORT_SYMBOL_GPL(device_register);
1991
1992 /**
1993 * get_device - increment reference count for device.
1994 * @dev: device.
1995 *
1996 * This simply forwards the call to kobject_get(), though
1997 * we do take care to provide for the case that we get a NULL
1998 * pointer passed in.
1999 */
get_device(struct device * dev)2000 struct device *get_device(struct device *dev)
2001 {
2002 return dev ? kobj_to_dev(kobject_get(&dev->kobj)) : NULL;
2003 }
2004 EXPORT_SYMBOL_GPL(get_device);
2005
2006 /**
2007 * put_device - decrement reference count.
2008 * @dev: device in question.
2009 */
put_device(struct device * dev)2010 void put_device(struct device *dev)
2011 {
2012 /* might_sleep(); */
2013 if (dev)
2014 kobject_put(&dev->kobj);
2015 }
2016 EXPORT_SYMBOL_GPL(put_device);
2017
2018 /**
2019 * device_del - delete device from system.
2020 * @dev: device.
2021 *
2022 * This is the first part of the device unregistration
2023 * sequence. This removes the device from the lists we control
2024 * from here, has it removed from the other driver model
2025 * subsystems it was added to in device_add(), and removes it
2026 * from the kobject hierarchy.
2027 *
2028 * NOTE: this should be called manually _iff_ device_add() was
2029 * also called manually.
2030 */
device_del(struct device * dev)2031 void device_del(struct device *dev)
2032 {
2033 struct device *parent = dev->parent;
2034 struct kobject *glue_dir = NULL;
2035 struct class_interface *class_intf;
2036
2037 /* Notify clients of device removal. This call must come
2038 * before dpm_sysfs_remove().
2039 */
2040 if (dev->bus)
2041 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
2042 BUS_NOTIFY_DEL_DEVICE, dev);
2043
2044 dpm_sysfs_remove(dev);
2045 if (parent)
2046 klist_del(&dev->p->knode_parent);
2047 if (MAJOR(dev->devt)) {
2048 devtmpfs_delete_node(dev);
2049 device_remove_sys_dev_entry(dev);
2050 device_remove_file(dev, &dev_attr_dev);
2051 }
2052 if (dev->class) {
2053 device_remove_class_symlinks(dev);
2054
2055 mutex_lock(&dev->class->p->mutex);
2056 /* notify any interfaces that the device is now gone */
2057 list_for_each_entry(class_intf,
2058 &dev->class->p->interfaces, node)
2059 if (class_intf->remove_dev)
2060 class_intf->remove_dev(dev, class_intf);
2061 /* remove the device from the class list */
2062 klist_del(&dev->knode_class);
2063 mutex_unlock(&dev->class->p->mutex);
2064 }
2065 device_remove_file(dev, &dev_attr_uevent);
2066 device_remove_attrs(dev);
2067 bus_remove_device(dev);
2068 device_pm_remove(dev);
2069 driver_deferred_probe_del(dev);
2070 device_remove_properties(dev);
2071 device_links_purge(dev);
2072
2073 /* Notify the platform of the removal, in case they
2074 * need to do anything...
2075 */
2076 if (platform_notify_remove)
2077 platform_notify_remove(dev);
2078 if (dev->bus)
2079 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
2080 BUS_NOTIFY_REMOVED_DEVICE, dev);
2081 kobject_uevent(&dev->kobj, KOBJ_REMOVE);
2082 glue_dir = get_glue_dir(dev);
2083 kobject_del(&dev->kobj);
2084 cleanup_glue_dir(dev, glue_dir);
2085 put_device(parent);
2086 }
2087 EXPORT_SYMBOL_GPL(device_del);
2088
2089 /**
2090 * device_unregister - unregister device from system.
2091 * @dev: device going away.
2092 *
2093 * We do this in two parts, like we do device_register(). First,
2094 * we remove it from all the subsystems with device_del(), then
2095 * we decrement the reference count via put_device(). If that
2096 * is the final reference count, the device will be cleaned up
2097 * via device_release() above. Otherwise, the structure will
2098 * stick around until the final reference to the device is dropped.
2099 */
device_unregister(struct device * dev)2100 void device_unregister(struct device *dev)
2101 {
2102 pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
2103 device_del(dev);
2104 put_device(dev);
2105 }
2106 EXPORT_SYMBOL_GPL(device_unregister);
2107
prev_device(struct klist_iter * i)2108 static struct device *prev_device(struct klist_iter *i)
2109 {
2110 struct klist_node *n = klist_prev(i);
2111 struct device *dev = NULL;
2112 struct device_private *p;
2113
2114 if (n) {
2115 p = to_device_private_parent(n);
2116 dev = p->device;
2117 }
2118 return dev;
2119 }
2120
next_device(struct klist_iter * i)2121 static struct device *next_device(struct klist_iter *i)
2122 {
2123 struct klist_node *n = klist_next(i);
2124 struct device *dev = NULL;
2125 struct device_private *p;
2126
2127 if (n) {
2128 p = to_device_private_parent(n);
2129 dev = p->device;
2130 }
2131 return dev;
2132 }
2133
2134 /**
2135 * device_get_devnode - path of device node file
2136 * @dev: device
2137 * @mode: returned file access mode
2138 * @uid: returned file owner
2139 * @gid: returned file group
2140 * @tmp: possibly allocated string
2141 *
2142 * Return the relative path of a possible device node.
2143 * Non-default names may need to allocate a memory to compose
2144 * a name. This memory is returned in tmp and needs to be
2145 * freed by the caller.
2146 */
device_get_devnode(struct device * dev,umode_t * mode,kuid_t * uid,kgid_t * gid,const char ** tmp)2147 const char *device_get_devnode(struct device *dev,
2148 umode_t *mode, kuid_t *uid, kgid_t *gid,
2149 const char **tmp)
2150 {
2151 char *s;
2152
2153 *tmp = NULL;
2154
2155 /* the device type may provide a specific name */
2156 if (dev->type && dev->type->devnode)
2157 *tmp = dev->type->devnode(dev, mode, uid, gid);
2158 if (*tmp)
2159 return *tmp;
2160
2161 /* the class may provide a specific name */
2162 if (dev->class && dev->class->devnode)
2163 *tmp = dev->class->devnode(dev, mode);
2164 if (*tmp)
2165 return *tmp;
2166
2167 /* return name without allocation, tmp == NULL */
2168 if (strchr(dev_name(dev), '!') == NULL)
2169 return dev_name(dev);
2170
2171 /* replace '!' in the name with '/' */
2172 s = kstrdup(dev_name(dev), GFP_KERNEL);
2173 if (!s)
2174 return NULL;
2175 strreplace(s, '!', '/');
2176 return *tmp = s;
2177 }
2178
2179 /**
2180 * device_for_each_child - device child iterator.
2181 * @parent: parent struct device.
2182 * @fn: function to be called for each device.
2183 * @data: data for the callback.
2184 *
2185 * Iterate over @parent's child devices, and call @fn for each,
2186 * passing it @data.
2187 *
2188 * We check the return of @fn each time. If it returns anything
2189 * other than 0, we break out and return that value.
2190 */
device_for_each_child(struct device * parent,void * data,int (* fn)(struct device * dev,void * data))2191 int device_for_each_child(struct device *parent, void *data,
2192 int (*fn)(struct device *dev, void *data))
2193 {
2194 struct klist_iter i;
2195 struct device *child;
2196 int error = 0;
2197
2198 if (!parent->p)
2199 return 0;
2200
2201 klist_iter_init(&parent->p->klist_children, &i);
2202 while ((child = next_device(&i)) && !error)
2203 error = fn(child, data);
2204 klist_iter_exit(&i);
2205 return error;
2206 }
2207 EXPORT_SYMBOL_GPL(device_for_each_child);
2208
2209 /**
2210 * device_for_each_child_reverse - device child iterator in reversed order.
2211 * @parent: parent struct device.
2212 * @fn: function to be called for each device.
2213 * @data: data for the callback.
2214 *
2215 * Iterate over @parent's child devices, and call @fn for each,
2216 * passing it @data.
2217 *
2218 * We check the return of @fn each time. If it returns anything
2219 * other than 0, we break out and return that value.
2220 */
device_for_each_child_reverse(struct device * parent,void * data,int (* fn)(struct device * dev,void * data))2221 int device_for_each_child_reverse(struct device *parent, void *data,
2222 int (*fn)(struct device *dev, void *data))
2223 {
2224 struct klist_iter i;
2225 struct device *child;
2226 int error = 0;
2227
2228 if (!parent->p)
2229 return 0;
2230
2231 klist_iter_init(&parent->p->klist_children, &i);
2232 while ((child = prev_device(&i)) && !error)
2233 error = fn(child, data);
2234 klist_iter_exit(&i);
2235 return error;
2236 }
2237 EXPORT_SYMBOL_GPL(device_for_each_child_reverse);
2238
2239 /**
2240 * device_find_child - device iterator for locating a particular device.
2241 * @parent: parent struct device
2242 * @match: Callback function to check device
2243 * @data: Data to pass to match function
2244 *
2245 * This is similar to the device_for_each_child() function above, but it
2246 * returns a reference to a device that is 'found' for later use, as
2247 * determined by the @match callback.
2248 *
2249 * The callback should return 0 if the device doesn't match and non-zero
2250 * if it does. If the callback returns non-zero and a reference to the
2251 * current device can be obtained, this function will return to the caller
2252 * and not iterate over any more devices.
2253 *
2254 * NOTE: you will need to drop the reference with put_device() after use.
2255 */
device_find_child(struct device * parent,void * data,int (* match)(struct device * dev,void * data))2256 struct device *device_find_child(struct device *parent, void *data,
2257 int (*match)(struct device *dev, void *data))
2258 {
2259 struct klist_iter i;
2260 struct device *child;
2261
2262 if (!parent)
2263 return NULL;
2264
2265 klist_iter_init(&parent->p->klist_children, &i);
2266 while ((child = next_device(&i)))
2267 if (match(child, data) && get_device(child))
2268 break;
2269 klist_iter_exit(&i);
2270 return child;
2271 }
2272 EXPORT_SYMBOL_GPL(device_find_child);
2273
devices_init(void)2274 int __init devices_init(void)
2275 {
2276 devices_kset = kset_create_and_add("devices", &device_uevent_ops, NULL);
2277 if (!devices_kset)
2278 return -ENOMEM;
2279 dev_kobj = kobject_create_and_add("dev", NULL);
2280 if (!dev_kobj)
2281 goto dev_kobj_err;
2282 sysfs_dev_block_kobj = kobject_create_and_add("block", dev_kobj);
2283 if (!sysfs_dev_block_kobj)
2284 goto block_kobj_err;
2285 sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj);
2286 if (!sysfs_dev_char_kobj)
2287 goto char_kobj_err;
2288
2289 return 0;
2290
2291 char_kobj_err:
2292 kobject_put(sysfs_dev_block_kobj);
2293 block_kobj_err:
2294 kobject_put(dev_kobj);
2295 dev_kobj_err:
2296 kset_unregister(devices_kset);
2297 return -ENOMEM;
2298 }
2299
device_check_offline(struct device * dev,void * not_used)2300 static int device_check_offline(struct device *dev, void *not_used)
2301 {
2302 int ret;
2303
2304 ret = device_for_each_child(dev, NULL, device_check_offline);
2305 if (ret)
2306 return ret;
2307
2308 return device_supports_offline(dev) && !dev->offline ? -EBUSY : 0;
2309 }
2310
2311 /**
2312 * device_offline - Prepare the device for hot-removal.
2313 * @dev: Device to be put offline.
2314 *
2315 * Execute the device bus type's .offline() callback, if present, to prepare
2316 * the device for a subsequent hot-removal. If that succeeds, the device must
2317 * not be used until either it is removed or its bus type's .online() callback
2318 * is executed.
2319 *
2320 * Call under device_hotplug_lock.
2321 */
device_offline(struct device * dev)2322 int device_offline(struct device *dev)
2323 {
2324 int ret;
2325
2326 if (dev->offline_disabled)
2327 return -EPERM;
2328
2329 ret = device_for_each_child(dev, NULL, device_check_offline);
2330 if (ret)
2331 return ret;
2332
2333 device_lock(dev);
2334 if (device_supports_offline(dev)) {
2335 if (dev->offline) {
2336 ret = 1;
2337 } else {
2338 ret = dev->bus->offline(dev);
2339 if (!ret) {
2340 kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
2341 dev->offline = true;
2342 }
2343 }
2344 }
2345 device_unlock(dev);
2346
2347 return ret;
2348 }
2349
2350 /**
2351 * device_online - Put the device back online after successful device_offline().
2352 * @dev: Device to be put back online.
2353 *
2354 * If device_offline() has been successfully executed for @dev, but the device
2355 * has not been removed subsequently, execute its bus type's .online() callback
2356 * to indicate that the device can be used again.
2357 *
2358 * Call under device_hotplug_lock.
2359 */
device_online(struct device * dev)2360 int device_online(struct device *dev)
2361 {
2362 int ret = 0;
2363
2364 device_lock(dev);
2365 if (device_supports_offline(dev)) {
2366 if (dev->offline) {
2367 ret = dev->bus->online(dev);
2368 if (!ret) {
2369 kobject_uevent(&dev->kobj, KOBJ_ONLINE);
2370 dev->offline = false;
2371 }
2372 } else {
2373 ret = 1;
2374 }
2375 }
2376 device_unlock(dev);
2377
2378 return ret;
2379 }
2380
2381 struct root_device {
2382 struct device dev;
2383 struct module *owner;
2384 };
2385
to_root_device(struct device * d)2386 static inline struct root_device *to_root_device(struct device *d)
2387 {
2388 return container_of(d, struct root_device, dev);
2389 }
2390
root_device_release(struct device * dev)2391 static void root_device_release(struct device *dev)
2392 {
2393 kfree(to_root_device(dev));
2394 }
2395
2396 /**
2397 * __root_device_register - allocate and register a root device
2398 * @name: root device name
2399 * @owner: owner module of the root device, usually THIS_MODULE
2400 *
2401 * This function allocates a root device and registers it
2402 * using device_register(). In order to free the returned
2403 * device, use root_device_unregister().
2404 *
2405 * Root devices are dummy devices which allow other devices
2406 * to be grouped under /sys/devices. Use this function to
2407 * allocate a root device and then use it as the parent of
2408 * any device which should appear under /sys/devices/{name}
2409 *
2410 * The /sys/devices/{name} directory will also contain a
2411 * 'module' symlink which points to the @owner directory
2412 * in sysfs.
2413 *
2414 * Returns &struct device pointer on success, or ERR_PTR() on error.
2415 *
2416 * Note: You probably want to use root_device_register().
2417 */
__root_device_register(const char * name,struct module * owner)2418 struct device *__root_device_register(const char *name, struct module *owner)
2419 {
2420 struct root_device *root;
2421 int err = -ENOMEM;
2422
2423 root = kzalloc(sizeof(struct root_device), GFP_KERNEL);
2424 if (!root)
2425 return ERR_PTR(err);
2426
2427 err = dev_set_name(&root->dev, "%s", name);
2428 if (err) {
2429 kfree(root);
2430 return ERR_PTR(err);
2431 }
2432
2433 root->dev.release = root_device_release;
2434
2435 err = device_register(&root->dev);
2436 if (err) {
2437 put_device(&root->dev);
2438 return ERR_PTR(err);
2439 }
2440
2441 #ifdef CONFIG_MODULES /* gotta find a "cleaner" way to do this */
2442 if (owner) {
2443 struct module_kobject *mk = &owner->mkobj;
2444
2445 err = sysfs_create_link(&root->dev.kobj, &mk->kobj, "module");
2446 if (err) {
2447 device_unregister(&root->dev);
2448 return ERR_PTR(err);
2449 }
2450 root->owner = owner;
2451 }
2452 #endif
2453
2454 return &root->dev;
2455 }
2456 EXPORT_SYMBOL_GPL(__root_device_register);
2457
2458 /**
2459 * root_device_unregister - unregister and free a root device
2460 * @dev: device going away
2461 *
2462 * This function unregisters and cleans up a device that was created by
2463 * root_device_register().
2464 */
root_device_unregister(struct device * dev)2465 void root_device_unregister(struct device *dev)
2466 {
2467 struct root_device *root = to_root_device(dev);
2468
2469 if (root->owner)
2470 sysfs_remove_link(&root->dev.kobj, "module");
2471
2472 device_unregister(dev);
2473 }
2474 EXPORT_SYMBOL_GPL(root_device_unregister);
2475
2476
device_create_release(struct device * dev)2477 static void device_create_release(struct device *dev)
2478 {
2479 pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
2480 kfree(dev);
2481 }
2482
2483 static struct device *
device_create_groups_vargs(struct class * class,struct device * parent,dev_t devt,void * drvdata,const struct attribute_group ** groups,const char * fmt,va_list args)2484 device_create_groups_vargs(struct class *class, struct device *parent,
2485 dev_t devt, void *drvdata,
2486 const struct attribute_group **groups,
2487 const char *fmt, va_list args)
2488 {
2489 struct device *dev = NULL;
2490 int retval = -ENODEV;
2491
2492 if (class == NULL || IS_ERR(class))
2493 goto error;
2494
2495 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2496 if (!dev) {
2497 retval = -ENOMEM;
2498 goto error;
2499 }
2500
2501 device_initialize(dev);
2502 dev->devt = devt;
2503 dev->class = class;
2504 dev->parent = parent;
2505 dev->groups = groups;
2506 dev->release = device_create_release;
2507 dev_set_drvdata(dev, drvdata);
2508
2509 retval = kobject_set_name_vargs(&dev->kobj, fmt, args);
2510 if (retval)
2511 goto error;
2512
2513 retval = device_add(dev);
2514 if (retval)
2515 goto error;
2516
2517 return dev;
2518
2519 error:
2520 put_device(dev);
2521 return ERR_PTR(retval);
2522 }
2523
2524 /**
2525 * device_create_vargs - creates a device and registers it with sysfs
2526 * @class: pointer to the struct class that this device should be registered to
2527 * @parent: pointer to the parent struct device of this new device, if any
2528 * @devt: the dev_t for the char device to be added
2529 * @drvdata: the data to be added to the device for callbacks
2530 * @fmt: string for the device's name
2531 * @args: va_list for the device's name
2532 *
2533 * This function can be used by char device classes. A struct device
2534 * will be created in sysfs, registered to the specified class.
2535 *
2536 * A "dev" file will be created, showing the dev_t for the device, if
2537 * the dev_t is not 0,0.
2538 * If a pointer to a parent struct device is passed in, the newly created
2539 * struct device will be a child of that device in sysfs.
2540 * The pointer to the struct device will be returned from the call.
2541 * Any further sysfs files that might be required can be created using this
2542 * pointer.
2543 *
2544 * Returns &struct device pointer on success, or ERR_PTR() on error.
2545 *
2546 * Note: the struct class passed to this function must have previously
2547 * been created with a call to class_create().
2548 */
device_create_vargs(struct class * class,struct device * parent,dev_t devt,void * drvdata,const char * fmt,va_list args)2549 struct device *device_create_vargs(struct class *class, struct device *parent,
2550 dev_t devt, void *drvdata, const char *fmt,
2551 va_list args)
2552 {
2553 return device_create_groups_vargs(class, parent, devt, drvdata, NULL,
2554 fmt, args);
2555 }
2556 EXPORT_SYMBOL_GPL(device_create_vargs);
2557
2558 /**
2559 * device_create - creates a device and registers it with sysfs
2560 * @class: pointer to the struct class that this device should be registered to
2561 * @parent: pointer to the parent struct device of this new device, if any
2562 * @devt: the dev_t for the char device to be added
2563 * @drvdata: the data to be added to the device for callbacks
2564 * @fmt: string for the device's name
2565 *
2566 * This function can be used by char device classes. A struct device
2567 * will be created in sysfs, registered to the specified class.
2568 *
2569 * A "dev" file will be created, showing the dev_t for the device, if
2570 * the dev_t is not 0,0.
2571 * If a pointer to a parent struct device is passed in, the newly created
2572 * struct device will be a child of that device in sysfs.
2573 * The pointer to the struct device will be returned from the call.
2574 * Any further sysfs files that might be required can be created using this
2575 * pointer.
2576 *
2577 * Returns &struct device pointer on success, or ERR_PTR() on error.
2578 *
2579 * Note: the struct class passed to this function must have previously
2580 * been created with a call to class_create().
2581 */
device_create(struct class * class,struct device * parent,dev_t devt,void * drvdata,const char * fmt,...)2582 struct device *device_create(struct class *class, struct device *parent,
2583 dev_t devt, void *drvdata, const char *fmt, ...)
2584 {
2585 va_list vargs;
2586 struct device *dev;
2587
2588 va_start(vargs, fmt);
2589 dev = device_create_vargs(class, parent, devt, drvdata, fmt, vargs);
2590 va_end(vargs);
2591 return dev;
2592 }
2593 EXPORT_SYMBOL_GPL(device_create);
2594
2595 /**
2596 * device_create_with_groups - creates a device and registers it with sysfs
2597 * @class: pointer to the struct class that this device should be registered to
2598 * @parent: pointer to the parent struct device of this new device, if any
2599 * @devt: the dev_t for the char device to be added
2600 * @drvdata: the data to be added to the device for callbacks
2601 * @groups: NULL-terminated list of attribute groups to be created
2602 * @fmt: string for the device's name
2603 *
2604 * This function can be used by char device classes. A struct device
2605 * will be created in sysfs, registered to the specified class.
2606 * Additional attributes specified in the groups parameter will also
2607 * be created automatically.
2608 *
2609 * A "dev" file will be created, showing the dev_t for the device, if
2610 * the dev_t is not 0,0.
2611 * If a pointer to a parent struct device is passed in, the newly created
2612 * struct device will be a child of that device in sysfs.
2613 * The pointer to the struct device will be returned from the call.
2614 * Any further sysfs files that might be required can be created using this
2615 * pointer.
2616 *
2617 * Returns &struct device pointer on success, or ERR_PTR() on error.
2618 *
2619 * Note: the struct class passed to this function must have previously
2620 * been created with a call to class_create().
2621 */
device_create_with_groups(struct class * class,struct device * parent,dev_t devt,void * drvdata,const struct attribute_group ** groups,const char * fmt,...)2622 struct device *device_create_with_groups(struct class *class,
2623 struct device *parent, dev_t devt,
2624 void *drvdata,
2625 const struct attribute_group **groups,
2626 const char *fmt, ...)
2627 {
2628 va_list vargs;
2629 struct device *dev;
2630
2631 va_start(vargs, fmt);
2632 dev = device_create_groups_vargs(class, parent, devt, drvdata, groups,
2633 fmt, vargs);
2634 va_end(vargs);
2635 return dev;
2636 }
2637 EXPORT_SYMBOL_GPL(device_create_with_groups);
2638
__match_devt(struct device * dev,const void * data)2639 static int __match_devt(struct device *dev, const void *data)
2640 {
2641 const dev_t *devt = data;
2642
2643 return dev->devt == *devt;
2644 }
2645
2646 /**
2647 * device_destroy - removes a device that was created with device_create()
2648 * @class: pointer to the struct class that this device was registered with
2649 * @devt: the dev_t of the device that was previously registered
2650 *
2651 * This call unregisters and cleans up a device that was created with a
2652 * call to device_create().
2653 */
device_destroy(struct class * class,dev_t devt)2654 void device_destroy(struct class *class, dev_t devt)
2655 {
2656 struct device *dev;
2657
2658 dev = class_find_device(class, NULL, &devt, __match_devt);
2659 if (dev) {
2660 put_device(dev);
2661 device_unregister(dev);
2662 }
2663 }
2664 EXPORT_SYMBOL_GPL(device_destroy);
2665
2666 /**
2667 * device_rename - renames a device
2668 * @dev: the pointer to the struct device to be renamed
2669 * @new_name: the new name of the device
2670 *
2671 * It is the responsibility of the caller to provide mutual
2672 * exclusion between two different calls of device_rename
2673 * on the same device to ensure that new_name is valid and
2674 * won't conflict with other devices.
2675 *
2676 * Note: Don't call this function. Currently, the networking layer calls this
2677 * function, but that will change. The following text from Kay Sievers offers
2678 * some insight:
2679 *
2680 * Renaming devices is racy at many levels, symlinks and other stuff are not
2681 * replaced atomically, and you get a "move" uevent, but it's not easy to
2682 * connect the event to the old and new device. Device nodes are not renamed at
2683 * all, there isn't even support for that in the kernel now.
2684 *
2685 * In the meantime, during renaming, your target name might be taken by another
2686 * driver, creating conflicts. Or the old name is taken directly after you
2687 * renamed it -- then you get events for the same DEVPATH, before you even see
2688 * the "move" event. It's just a mess, and nothing new should ever rely on
2689 * kernel device renaming. Besides that, it's not even implemented now for
2690 * other things than (driver-core wise very simple) network devices.
2691 *
2692 * We are currently about to change network renaming in udev to completely
2693 * disallow renaming of devices in the same namespace as the kernel uses,
2694 * because we can't solve the problems properly, that arise with swapping names
2695 * of multiple interfaces without races. Means, renaming of eth[0-9]* will only
2696 * be allowed to some other name than eth[0-9]*, for the aforementioned
2697 * reasons.
2698 *
2699 * Make up a "real" name in the driver before you register anything, or add
2700 * some other attributes for userspace to find the device, or use udev to add
2701 * symlinks -- but never rename kernel devices later, it's a complete mess. We
2702 * don't even want to get into that and try to implement the missing pieces in
2703 * the core. We really have other pieces to fix in the driver core mess. :)
2704 */
device_rename(struct device * dev,const char * new_name)2705 int device_rename(struct device *dev, const char *new_name)
2706 {
2707 struct kobject *kobj = &dev->kobj;
2708 char *old_device_name = NULL;
2709 int error;
2710
2711 dev = get_device(dev);
2712 if (!dev)
2713 return -EINVAL;
2714
2715 dev_dbg(dev, "renaming to %s\n", new_name);
2716
2717 old_device_name = kstrdup(dev_name(dev), GFP_KERNEL);
2718 if (!old_device_name) {
2719 error = -ENOMEM;
2720 goto out;
2721 }
2722
2723 if (dev->class) {
2724 error = sysfs_rename_link_ns(&dev->class->p->subsys.kobj,
2725 kobj, old_device_name,
2726 new_name, kobject_namespace(kobj));
2727 if (error)
2728 goto out;
2729 }
2730
2731 error = kobject_rename(kobj, new_name);
2732 if (error)
2733 goto out;
2734
2735 out:
2736 put_device(dev);
2737
2738 kfree(old_device_name);
2739
2740 return error;
2741 }
2742 EXPORT_SYMBOL_GPL(device_rename);
2743
device_move_class_links(struct device * dev,struct device * old_parent,struct device * new_parent)2744 static int device_move_class_links(struct device *dev,
2745 struct device *old_parent,
2746 struct device *new_parent)
2747 {
2748 int error = 0;
2749
2750 if (old_parent)
2751 sysfs_remove_link(&dev->kobj, "device");
2752 if (new_parent)
2753 error = sysfs_create_link(&dev->kobj, &new_parent->kobj,
2754 "device");
2755 return error;
2756 }
2757
2758 /**
2759 * device_move - moves a device to a new parent
2760 * @dev: the pointer to the struct device to be moved
2761 * @new_parent: the new parent of the device (can by NULL)
2762 * @dpm_order: how to reorder the dpm_list
2763 */
device_move(struct device * dev,struct device * new_parent,enum dpm_order dpm_order)2764 int device_move(struct device *dev, struct device *new_parent,
2765 enum dpm_order dpm_order)
2766 {
2767 int error;
2768 struct device *old_parent;
2769 struct kobject *new_parent_kobj;
2770
2771 dev = get_device(dev);
2772 if (!dev)
2773 return -EINVAL;
2774
2775 device_pm_lock();
2776 new_parent = get_device(new_parent);
2777 new_parent_kobj = get_device_parent(dev, new_parent);
2778 if (IS_ERR(new_parent_kobj)) {
2779 error = PTR_ERR(new_parent_kobj);
2780 put_device(new_parent);
2781 goto out;
2782 }
2783
2784 pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev),
2785 __func__, new_parent ? dev_name(new_parent) : "<NULL>");
2786 error = kobject_move(&dev->kobj, new_parent_kobj);
2787 if (error) {
2788 cleanup_glue_dir(dev, new_parent_kobj);
2789 put_device(new_parent);
2790 goto out;
2791 }
2792 old_parent = dev->parent;
2793 dev->parent = new_parent;
2794 if (old_parent)
2795 klist_remove(&dev->p->knode_parent);
2796 if (new_parent) {
2797 klist_add_tail(&dev->p->knode_parent,
2798 &new_parent->p->klist_children);
2799 set_dev_node(dev, dev_to_node(new_parent));
2800 }
2801
2802 if (dev->class) {
2803 error = device_move_class_links(dev, old_parent, new_parent);
2804 if (error) {
2805 /* We ignore errors on cleanup since we're hosed anyway... */
2806 device_move_class_links(dev, new_parent, old_parent);
2807 if (!kobject_move(&dev->kobj, &old_parent->kobj)) {
2808 if (new_parent)
2809 klist_remove(&dev->p->knode_parent);
2810 dev->parent = old_parent;
2811 if (old_parent) {
2812 klist_add_tail(&dev->p->knode_parent,
2813 &old_parent->p->klist_children);
2814 set_dev_node(dev, dev_to_node(old_parent));
2815 }
2816 }
2817 cleanup_glue_dir(dev, new_parent_kobj);
2818 put_device(new_parent);
2819 goto out;
2820 }
2821 }
2822 switch (dpm_order) {
2823 case DPM_ORDER_NONE:
2824 break;
2825 case DPM_ORDER_DEV_AFTER_PARENT:
2826 device_pm_move_after(dev, new_parent);
2827 devices_kset_move_after(dev, new_parent);
2828 break;
2829 case DPM_ORDER_PARENT_BEFORE_DEV:
2830 device_pm_move_before(new_parent, dev);
2831 devices_kset_move_before(new_parent, dev);
2832 break;
2833 case DPM_ORDER_DEV_LAST:
2834 device_pm_move_last(dev);
2835 devices_kset_move_last(dev);
2836 break;
2837 }
2838
2839 put_device(old_parent);
2840 out:
2841 device_pm_unlock();
2842 put_device(dev);
2843 return error;
2844 }
2845 EXPORT_SYMBOL_GPL(device_move);
2846
2847 /**
2848 * device_shutdown - call ->shutdown() on each device to shutdown.
2849 */
device_shutdown(void)2850 void device_shutdown(void)
2851 {
2852 struct device *dev, *parent;
2853
2854 wait_for_device_probe();
2855 device_block_probing();
2856
2857 cpufreq_suspend();
2858
2859 spin_lock(&devices_kset->list_lock);
2860 /*
2861 * Walk the devices list backward, shutting down each in turn.
2862 * Beware that device unplug events may also start pulling
2863 * devices offline, even as the system is shutting down.
2864 */
2865 while (!list_empty(&devices_kset->list)) {
2866 dev = list_entry(devices_kset->list.prev, struct device,
2867 kobj.entry);
2868
2869 /*
2870 * hold reference count of device's parent to
2871 * prevent it from being freed because parent's
2872 * lock is to be held
2873 */
2874 parent = get_device(dev->parent);
2875 get_device(dev);
2876 /*
2877 * Make sure the device is off the kset list, in the
2878 * event that dev->*->shutdown() doesn't remove it.
2879 */
2880 list_del_init(&dev->kobj.entry);
2881 spin_unlock(&devices_kset->list_lock);
2882
2883 /* hold lock to avoid race with probe/release */
2884 if (parent)
2885 device_lock(parent);
2886 device_lock(dev);
2887
2888 /* Don't allow any more runtime suspends */
2889 pm_runtime_get_noresume(dev);
2890 pm_runtime_barrier(dev);
2891
2892 if (dev->class && dev->class->shutdown_pre) {
2893 if (initcall_debug)
2894 dev_info(dev, "shutdown_pre\n");
2895 dev->class->shutdown_pre(dev);
2896 }
2897 if (dev->bus && dev->bus->shutdown) {
2898 if (initcall_debug)
2899 dev_info(dev, "shutdown\n");
2900 dev->bus->shutdown(dev);
2901 } else if (dev->driver && dev->driver->shutdown) {
2902 if (initcall_debug)
2903 dev_info(dev, "shutdown\n");
2904 dev->driver->shutdown(dev);
2905 }
2906
2907 device_unlock(dev);
2908 if (parent)
2909 device_unlock(parent);
2910
2911 put_device(dev);
2912 put_device(parent);
2913
2914 spin_lock(&devices_kset->list_lock);
2915 }
2916 spin_unlock(&devices_kset->list_lock);
2917 }
2918
2919 /*
2920 * Device logging functions
2921 */
2922
2923 #ifdef CONFIG_PRINTK
2924 static int
create_syslog_header(const struct device * dev,char * hdr,size_t hdrlen)2925 create_syslog_header(const struct device *dev, char *hdr, size_t hdrlen)
2926 {
2927 const char *subsys;
2928 size_t pos = 0;
2929
2930 if (dev->class)
2931 subsys = dev->class->name;
2932 else if (dev->bus)
2933 subsys = dev->bus->name;
2934 else
2935 return 0;
2936
2937 pos += snprintf(hdr + pos, hdrlen - pos, "SUBSYSTEM=%s", subsys);
2938 if (pos >= hdrlen)
2939 goto overflow;
2940
2941 /*
2942 * Add device identifier DEVICE=:
2943 * b12:8 block dev_t
2944 * c127:3 char dev_t
2945 * n8 netdev ifindex
2946 * +sound:card0 subsystem:devname
2947 */
2948 if (MAJOR(dev->devt)) {
2949 char c;
2950
2951 if (strcmp(subsys, "block") == 0)
2952 c = 'b';
2953 else
2954 c = 'c';
2955 pos++;
2956 pos += snprintf(hdr + pos, hdrlen - pos,
2957 "DEVICE=%c%u:%u",
2958 c, MAJOR(dev->devt), MINOR(dev->devt));
2959 } else if (strcmp(subsys, "net") == 0) {
2960 struct net_device *net = to_net_dev(dev);
2961
2962 pos++;
2963 pos += snprintf(hdr + pos, hdrlen - pos,
2964 "DEVICE=n%u", net->ifindex);
2965 } else {
2966 pos++;
2967 pos += snprintf(hdr + pos, hdrlen - pos,
2968 "DEVICE=+%s:%s", subsys, dev_name(dev));
2969 }
2970
2971 if (pos >= hdrlen)
2972 goto overflow;
2973
2974 return pos;
2975
2976 overflow:
2977 dev_WARN(dev, "device/subsystem name too long");
2978 return 0;
2979 }
2980
dev_vprintk_emit(int level,const struct device * dev,const char * fmt,va_list args)2981 int dev_vprintk_emit(int level, const struct device *dev,
2982 const char *fmt, va_list args)
2983 {
2984 char hdr[128];
2985 size_t hdrlen;
2986
2987 hdrlen = create_syslog_header(dev, hdr, sizeof(hdr));
2988
2989 return vprintk_emit(0, level, hdrlen ? hdr : NULL, hdrlen, fmt, args);
2990 }
2991 EXPORT_SYMBOL(dev_vprintk_emit);
2992
dev_printk_emit(int level,const struct device * dev,const char * fmt,...)2993 int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...)
2994 {
2995 va_list args;
2996 int r;
2997
2998 va_start(args, fmt);
2999
3000 r = dev_vprintk_emit(level, dev, fmt, args);
3001
3002 va_end(args);
3003
3004 return r;
3005 }
3006 EXPORT_SYMBOL(dev_printk_emit);
3007
__dev_printk(const char * level,const struct device * dev,struct va_format * vaf)3008 static void __dev_printk(const char *level, const struct device *dev,
3009 struct va_format *vaf)
3010 {
3011 if (dev)
3012 dev_printk_emit(level[1] - '0', dev, "%s %s: %pV",
3013 dev_driver_string(dev), dev_name(dev), vaf);
3014 else
3015 printk("%s(NULL device *): %pV", level, vaf);
3016 }
3017
dev_printk(const char * level,const struct device * dev,const char * fmt,...)3018 void dev_printk(const char *level, const struct device *dev,
3019 const char *fmt, ...)
3020 {
3021 struct va_format vaf;
3022 va_list args;
3023
3024 va_start(args, fmt);
3025
3026 vaf.fmt = fmt;
3027 vaf.va = &args;
3028
3029 __dev_printk(level, dev, &vaf);
3030
3031 va_end(args);
3032 }
3033 EXPORT_SYMBOL(dev_printk);
3034
3035 #define define_dev_printk_level(func, kern_level) \
3036 void func(const struct device *dev, const char *fmt, ...) \
3037 { \
3038 struct va_format vaf; \
3039 va_list args; \
3040 \
3041 va_start(args, fmt); \
3042 \
3043 vaf.fmt = fmt; \
3044 vaf.va = &args; \
3045 \
3046 __dev_printk(kern_level, dev, &vaf); \
3047 \
3048 va_end(args); \
3049 } \
3050 EXPORT_SYMBOL(func);
3051
3052 define_dev_printk_level(dev_emerg, KERN_EMERG);
3053 define_dev_printk_level(dev_alert, KERN_ALERT);
3054 define_dev_printk_level(dev_crit, KERN_CRIT);
3055 define_dev_printk_level(dev_err, KERN_ERR);
3056 define_dev_printk_level(dev_warn, KERN_WARNING);
3057 define_dev_printk_level(dev_notice, KERN_NOTICE);
3058 define_dev_printk_level(_dev_info, KERN_INFO);
3059
3060 #endif
3061
fwnode_is_primary(struct fwnode_handle * fwnode)3062 static inline bool fwnode_is_primary(struct fwnode_handle *fwnode)
3063 {
3064 return fwnode && !IS_ERR(fwnode->secondary);
3065 }
3066
3067 /**
3068 * set_primary_fwnode - Change the primary firmware node of a given device.
3069 * @dev: Device to handle.
3070 * @fwnode: New primary firmware node of the device.
3071 *
3072 * Set the device's firmware node pointer to @fwnode, but if a secondary
3073 * firmware node of the device is present, preserve it.
3074 */
set_primary_fwnode(struct device * dev,struct fwnode_handle * fwnode)3075 void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
3076 {
3077 if (fwnode) {
3078 struct fwnode_handle *fn = dev->fwnode;
3079
3080 if (fwnode_is_primary(fn))
3081 fn = fn->secondary;
3082
3083 if (fn) {
3084 WARN_ON(fwnode->secondary);
3085 fwnode->secondary = fn;
3086 }
3087 dev->fwnode = fwnode;
3088 } else {
3089 dev->fwnode = fwnode_is_primary(dev->fwnode) ?
3090 dev->fwnode->secondary : NULL;
3091 }
3092 }
3093 EXPORT_SYMBOL_GPL(set_primary_fwnode);
3094
3095 /**
3096 * set_secondary_fwnode - Change the secondary firmware node of a given device.
3097 * @dev: Device to handle.
3098 * @fwnode: New secondary firmware node of the device.
3099 *
3100 * If a primary firmware node of the device is present, set its secondary
3101 * pointer to @fwnode. Otherwise, set the device's firmware node pointer to
3102 * @fwnode.
3103 */
set_secondary_fwnode(struct device * dev,struct fwnode_handle * fwnode)3104 void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
3105 {
3106 if (fwnode)
3107 fwnode->secondary = ERR_PTR(-ENODEV);
3108
3109 if (fwnode_is_primary(dev->fwnode))
3110 dev->fwnode->secondary = fwnode;
3111 else
3112 dev->fwnode = fwnode;
3113 }
3114
3115 /**
3116 * device_set_of_node_from_dev - reuse device-tree node of another device
3117 * @dev: device whose device-tree node is being set
3118 * @dev2: device whose device-tree node is being reused
3119 *
3120 * Takes another reference to the new device-tree node after first dropping
3121 * any reference held to the old node.
3122 */
device_set_of_node_from_dev(struct device * dev,const struct device * dev2)3123 void device_set_of_node_from_dev(struct device *dev, const struct device *dev2)
3124 {
3125 of_node_put(dev->of_node);
3126 dev->of_node = of_node_get(dev2->of_node);
3127 dev->of_node_reused = true;
3128 }
3129 EXPORT_SYMBOL_GPL(device_set_of_node_from_dev);
3130