• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/core.c - core driver model code (device registration, etc)
4  *
5  * Copyright (c) 2002-3 Patrick Mochel
6  * Copyright (c) 2002-3 Open Source Development Labs
7  * Copyright (c) 2006 Greg Kroah-Hartman <gregkh@suse.de>
8  * Copyright (c) 2006 Novell, Inc.
9  */
10 
11 #include <linux/acpi.h>
12 #include <linux/cpufreq.h>
13 #include <linux/device.h>
14 #include <linux/err.h>
15 #include <linux/fwnode.h>
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/string.h>
20 #include <linux/kdev_t.h>
21 #include <linux/notifier.h>
22 #include <linux/of.h>
23 #include <linux/of_device.h>
24 #include <linux/genhd.h>
25 #include <linux/mutex.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/netdevice.h>
28 #include <linux/sched/signal.h>
29 #include <linux/sched/mm.h>
30 #include <linux/swiotlb.h>
31 #include <linux/sysfs.h>
32 
33 #include "base.h"
34 #include "power/power.h"
35 
36 #ifdef CONFIG_SYSFS_DEPRECATED
37 #ifdef CONFIG_SYSFS_DEPRECATED_V2
38 long sysfs_deprecated = 1;
39 #else
40 long sysfs_deprecated = 0;
41 #endif
sysfs_deprecated_setup(char * arg)42 static int __init sysfs_deprecated_setup(char *arg)
43 {
44 	return kstrtol(arg, 10, &sysfs_deprecated);
45 }
46 early_param("sysfs.deprecated", sysfs_deprecated_setup);
47 #endif
48 
49 /* Device links support. */
50 static LIST_HEAD(deferred_sync);
51 static unsigned int defer_sync_state_count = 1;
52 static DEFINE_MUTEX(fwnode_link_lock);
53 static bool fw_devlink_is_permissive(void);
54 
55 /**
56  * fwnode_link_add - Create a link between two fwnode_handles.
57  * @con: Consumer end of the link.
58  * @sup: Supplier end of the link.
59  *
60  * Create a fwnode link between fwnode handles @con and @sup. The fwnode link
61  * represents the detail that the firmware lists @sup fwnode as supplying a
62  * resource to @con.
63  *
64  * The driver core will use the fwnode link to create a device link between the
65  * two device objects corresponding to @con and @sup when they are created. The
66  * driver core will automatically delete the fwnode link between @con and @sup
67  * after doing that.
68  *
69  * Attempts to create duplicate links between the same pair of fwnode handles
70  * are ignored and there is no reference counting.
71  */
fwnode_link_add(struct fwnode_handle * con,struct fwnode_handle * sup)72 int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup)
73 {
74 	struct fwnode_link *link;
75 	int ret = 0;
76 
77 	mutex_lock(&fwnode_link_lock);
78 
79 	list_for_each_entry(link, &sup->consumers, s_hook)
80 		if (link->consumer == con)
81 			goto out;
82 
83 	link = kzalloc(sizeof(*link), GFP_KERNEL);
84 	if (!link) {
85 		ret = -ENOMEM;
86 		goto out;
87 	}
88 
89 	link->supplier = sup;
90 	INIT_LIST_HEAD(&link->s_hook);
91 	link->consumer = con;
92 	INIT_LIST_HEAD(&link->c_hook);
93 
94 	list_add(&link->s_hook, &sup->consumers);
95 	list_add(&link->c_hook, &con->suppliers);
96 out:
97 	mutex_unlock(&fwnode_link_lock);
98 
99 	return ret;
100 }
101 
102 /**
103  * fwnode_links_purge_suppliers - Delete all supplier links of fwnode_handle.
104  * @fwnode: fwnode whose supplier links need to be deleted
105  *
106  * Deletes all supplier links connecting directly to @fwnode.
107  */
fwnode_links_purge_suppliers(struct fwnode_handle * fwnode)108 static void fwnode_links_purge_suppliers(struct fwnode_handle *fwnode)
109 {
110 	struct fwnode_link *link, *tmp;
111 
112 	mutex_lock(&fwnode_link_lock);
113 	list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) {
114 		list_del(&link->s_hook);
115 		list_del(&link->c_hook);
116 		kfree(link);
117 	}
118 	mutex_unlock(&fwnode_link_lock);
119 }
120 
121 /**
122  * fwnode_links_purge_consumers - Delete all consumer links of fwnode_handle.
123  * @fwnode: fwnode whose consumer links need to be deleted
124  *
125  * Deletes all consumer links connecting directly to @fwnode.
126  */
fwnode_links_purge_consumers(struct fwnode_handle * fwnode)127 static void fwnode_links_purge_consumers(struct fwnode_handle *fwnode)
128 {
129 	struct fwnode_link *link, *tmp;
130 
131 	mutex_lock(&fwnode_link_lock);
132 	list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook) {
133 		list_del(&link->s_hook);
134 		list_del(&link->c_hook);
135 		kfree(link);
136 	}
137 	mutex_unlock(&fwnode_link_lock);
138 }
139 
140 /**
141  * fwnode_links_purge - Delete all links connected to a fwnode_handle.
142  * @fwnode: fwnode whose links needs to be deleted
143  *
144  * Deletes all links connecting directly to a fwnode.
145  */
fwnode_links_purge(struct fwnode_handle * fwnode)146 void fwnode_links_purge(struct fwnode_handle *fwnode)
147 {
148 	fwnode_links_purge_suppliers(fwnode);
149 	fwnode_links_purge_consumers(fwnode);
150 }
151 
fw_devlink_purge_absent_suppliers(struct fwnode_handle * fwnode)152 static void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode)
153 {
154 	struct fwnode_handle *child;
155 
156 	/* Don't purge consumer links of an added child */
157 	if (fwnode->dev)
158 		return;
159 
160 	fwnode->flags |= FWNODE_FLAG_NOT_DEVICE;
161 	fwnode_links_purge_consumers(fwnode);
162 
163 	fwnode_for_each_available_child_node(fwnode, child)
164 		fw_devlink_purge_absent_suppliers(child);
165 }
166 
167 #ifdef CONFIG_SRCU
168 static DEFINE_MUTEX(device_links_lock);
169 DEFINE_STATIC_SRCU(device_links_srcu);
170 
device_links_write_lock(void)171 static inline void device_links_write_lock(void)
172 {
173 	mutex_lock(&device_links_lock);
174 }
175 
device_links_write_unlock(void)176 static inline void device_links_write_unlock(void)
177 {
178 	mutex_unlock(&device_links_lock);
179 }
180 
device_links_read_lock(void)181 int device_links_read_lock(void) __acquires(&device_links_srcu)
182 {
183 	return srcu_read_lock(&device_links_srcu);
184 }
185 
device_links_read_unlock(int idx)186 void device_links_read_unlock(int idx) __releases(&device_links_srcu)
187 {
188 	srcu_read_unlock(&device_links_srcu, idx);
189 }
190 
device_links_read_lock_held(void)191 int device_links_read_lock_held(void)
192 {
193 	return srcu_read_lock_held(&device_links_srcu);
194 }
195 
device_link_synchronize_removal(void)196 static void device_link_synchronize_removal(void)
197 {
198 	synchronize_srcu(&device_links_srcu);
199 }
200 
device_link_remove_from_lists(struct device_link * link)201 static void device_link_remove_from_lists(struct device_link *link)
202 {
203 	list_del_rcu(&link->s_node);
204 	list_del_rcu(&link->c_node);
205 }
206 #else /* !CONFIG_SRCU */
207 static DECLARE_RWSEM(device_links_lock);
208 
device_links_write_lock(void)209 static inline void device_links_write_lock(void)
210 {
211 	down_write(&device_links_lock);
212 }
213 
device_links_write_unlock(void)214 static inline void device_links_write_unlock(void)
215 {
216 	up_write(&device_links_lock);
217 }
218 
device_links_read_lock(void)219 int device_links_read_lock(void)
220 {
221 	down_read(&device_links_lock);
222 	return 0;
223 }
224 
device_links_read_unlock(int not_used)225 void device_links_read_unlock(int not_used)
226 {
227 	up_read(&device_links_lock);
228 }
229 
230 #ifdef CONFIG_DEBUG_LOCK_ALLOC
device_links_read_lock_held(void)231 int device_links_read_lock_held(void)
232 {
233 	return lockdep_is_held(&device_links_lock);
234 }
235 #endif
236 
device_link_synchronize_removal(void)237 static inline void device_link_synchronize_removal(void)
238 {
239 }
240 
device_link_remove_from_lists(struct device_link * link)241 static void device_link_remove_from_lists(struct device_link *link)
242 {
243 	list_del(&link->s_node);
244 	list_del(&link->c_node);
245 }
246 #endif /* !CONFIG_SRCU */
247 
device_is_ancestor(struct device * dev,struct device * target)248 static bool device_is_ancestor(struct device *dev, struct device *target)
249 {
250 	while (target->parent) {
251 		target = target->parent;
252 		if (dev == target)
253 			return true;
254 	}
255 	return false;
256 }
257 
258 /**
259  * device_is_dependent - Check if one device depends on another one
260  * @dev: Device to check dependencies for.
261  * @target: Device to check against.
262  *
263  * Check if @target depends on @dev or any device dependent on it (its child or
264  * its consumer etc).  Return 1 if that is the case or 0 otherwise.
265  */
device_is_dependent(struct device * dev,void * target)266 int device_is_dependent(struct device *dev, void *target)
267 {
268 	struct device_link *link;
269 	int ret;
270 
271 	/*
272 	 * The "ancestors" check is needed to catch the case when the target
273 	 * device has not been completely initialized yet and it is still
274 	 * missing from the list of children of its parent device.
275 	 */
276 	if (dev == target || device_is_ancestor(dev, target))
277 		return 1;
278 
279 	ret = device_for_each_child(dev, target, device_is_dependent);
280 	if (ret)
281 		return ret;
282 
283 	list_for_each_entry(link, &dev->links.consumers, s_node) {
284 		if ((link->flags & ~DL_FLAG_INFERRED) ==
285 		    (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
286 			continue;
287 
288 		if (link->consumer == target)
289 			return 1;
290 
291 		ret = device_is_dependent(link->consumer, target);
292 		if (ret)
293 			break;
294 	}
295 	return ret;
296 }
297 
device_link_init_status(struct device_link * link,struct device * consumer,struct device * supplier)298 static void device_link_init_status(struct device_link *link,
299 				    struct device *consumer,
300 				    struct device *supplier)
301 {
302 	switch (supplier->links.status) {
303 	case DL_DEV_PROBING:
304 		switch (consumer->links.status) {
305 		case DL_DEV_PROBING:
306 			/*
307 			 * A consumer driver can create a link to a supplier
308 			 * that has not completed its probing yet as long as it
309 			 * knows that the supplier is already functional (for
310 			 * example, it has just acquired some resources from the
311 			 * supplier).
312 			 */
313 			link->status = DL_STATE_CONSUMER_PROBE;
314 			break;
315 		default:
316 			link->status = DL_STATE_DORMANT;
317 			break;
318 		}
319 		break;
320 	case DL_DEV_DRIVER_BOUND:
321 		switch (consumer->links.status) {
322 		case DL_DEV_PROBING:
323 			link->status = DL_STATE_CONSUMER_PROBE;
324 			break;
325 		case DL_DEV_DRIVER_BOUND:
326 			link->status = DL_STATE_ACTIVE;
327 			break;
328 		default:
329 			link->status = DL_STATE_AVAILABLE;
330 			break;
331 		}
332 		break;
333 	case DL_DEV_UNBINDING:
334 		link->status = DL_STATE_SUPPLIER_UNBIND;
335 		break;
336 	default:
337 		link->status = DL_STATE_DORMANT;
338 		break;
339 	}
340 }
341 
device_reorder_to_tail(struct device * dev,void * not_used)342 static int device_reorder_to_tail(struct device *dev, void *not_used)
343 {
344 	struct device_link *link;
345 
346 	/*
347 	 * Devices that have not been registered yet will be put to the ends
348 	 * of the lists during the registration, so skip them here.
349 	 */
350 	if (device_is_registered(dev))
351 		devices_kset_move_last(dev);
352 
353 	if (device_pm_initialized(dev))
354 		device_pm_move_last(dev);
355 
356 	device_for_each_child(dev, NULL, device_reorder_to_tail);
357 	list_for_each_entry(link, &dev->links.consumers, s_node) {
358 		if ((link->flags & ~DL_FLAG_INFERRED) ==
359 		    (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
360 			continue;
361 		device_reorder_to_tail(link->consumer, NULL);
362 	}
363 
364 	return 0;
365 }
366 
367 /**
368  * device_pm_move_to_tail - Move set of devices to the end of device lists
369  * @dev: Device to move
370  *
371  * This is a device_reorder_to_tail() wrapper taking the requisite locks.
372  *
373  * It moves the @dev along with all of its children and all of its consumers
374  * to the ends of the device_kset and dpm_list, recursively.
375  */
device_pm_move_to_tail(struct device * dev)376 void device_pm_move_to_tail(struct device *dev)
377 {
378 	int idx;
379 
380 	idx = device_links_read_lock();
381 	device_pm_lock();
382 	device_reorder_to_tail(dev, NULL);
383 	device_pm_unlock();
384 	device_links_read_unlock(idx);
385 }
386 
387 #define to_devlink(dev)	container_of((dev), struct device_link, link_dev)
388 
status_show(struct device * dev,struct device_attribute * attr,char * buf)389 static ssize_t status_show(struct device *dev,
390 			   struct device_attribute *attr, char *buf)
391 {
392 	const char *output;
393 
394 	switch (to_devlink(dev)->status) {
395 	case DL_STATE_NONE:
396 		output = "not tracked";
397 		break;
398 	case DL_STATE_DORMANT:
399 		output = "dormant";
400 		break;
401 	case DL_STATE_AVAILABLE:
402 		output = "available";
403 		break;
404 	case DL_STATE_CONSUMER_PROBE:
405 		output = "consumer probing";
406 		break;
407 	case DL_STATE_ACTIVE:
408 		output = "active";
409 		break;
410 	case DL_STATE_SUPPLIER_UNBIND:
411 		output = "supplier unbinding";
412 		break;
413 	default:
414 		output = "unknown";
415 		break;
416 	}
417 
418 	return sysfs_emit(buf, "%s\n", output);
419 }
420 static DEVICE_ATTR_RO(status);
421 
auto_remove_on_show(struct device * dev,struct device_attribute * attr,char * buf)422 static ssize_t auto_remove_on_show(struct device *dev,
423 				   struct device_attribute *attr, char *buf)
424 {
425 	struct device_link *link = to_devlink(dev);
426 	const char *output;
427 
428 	if (link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
429 		output = "supplier unbind";
430 	else if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER)
431 		output = "consumer unbind";
432 	else
433 		output = "never";
434 
435 	return sysfs_emit(buf, "%s\n", output);
436 }
437 static DEVICE_ATTR_RO(auto_remove_on);
438 
runtime_pm_show(struct device * dev,struct device_attribute * attr,char * buf)439 static ssize_t runtime_pm_show(struct device *dev,
440 			       struct device_attribute *attr, char *buf)
441 {
442 	struct device_link *link = to_devlink(dev);
443 
444 	return sysfs_emit(buf, "%d\n", !!(link->flags & DL_FLAG_PM_RUNTIME));
445 }
446 static DEVICE_ATTR_RO(runtime_pm);
447 
sync_state_only_show(struct device * dev,struct device_attribute * attr,char * buf)448 static ssize_t sync_state_only_show(struct device *dev,
449 				    struct device_attribute *attr, char *buf)
450 {
451 	struct device_link *link = to_devlink(dev);
452 
453 	return sysfs_emit(buf, "%d\n",
454 			  !!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
455 }
456 static DEVICE_ATTR_RO(sync_state_only);
457 
458 static struct attribute *devlink_attrs[] = {
459 	&dev_attr_status.attr,
460 	&dev_attr_auto_remove_on.attr,
461 	&dev_attr_runtime_pm.attr,
462 	&dev_attr_sync_state_only.attr,
463 	NULL,
464 };
465 ATTRIBUTE_GROUPS(devlink);
466 
device_link_release_fn(struct work_struct * work)467 static void device_link_release_fn(struct work_struct *work)
468 {
469 	struct device_link *link = container_of(work, struct device_link, rm_work);
470 
471 	/* Ensure that all references to the link object have been dropped. */
472 	device_link_synchronize_removal();
473 
474 	pm_runtime_release_supplier(link);
475 	pm_request_idle(link->supplier);
476 
477 	put_device(link->consumer);
478 	put_device(link->supplier);
479 	kfree(link);
480 }
481 
devlink_dev_release(struct device * dev)482 static void devlink_dev_release(struct device *dev)
483 {
484 	struct device_link *link = to_devlink(dev);
485 
486 	INIT_WORK(&link->rm_work, device_link_release_fn);
487 	/*
488 	 * It may take a while to complete this work because of the SRCU
489 	 * synchronization in device_link_release_fn() and if the consumer or
490 	 * supplier devices get deleted when it runs, so put it into the "long"
491 	 * workqueue.
492 	 */
493 	queue_work(system_long_wq, &link->rm_work);
494 }
495 
496 static struct class devlink_class = {
497 	.name = "devlink",
498 	.owner = THIS_MODULE,
499 	.dev_groups = devlink_groups,
500 	.dev_release = devlink_dev_release,
501 };
502 
devlink_add_symlinks(struct device * dev,struct class_interface * class_intf)503 static int devlink_add_symlinks(struct device *dev,
504 				struct class_interface *class_intf)
505 {
506 	int ret;
507 	size_t len;
508 	struct device_link *link = to_devlink(dev);
509 	struct device *sup = link->supplier;
510 	struct device *con = link->consumer;
511 	char *buf;
512 
513 	len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
514 		  strlen(dev_bus_name(con)) + strlen(dev_name(con)));
515 	len += strlen(":");
516 	len += strlen("supplier:") + 1;
517 	buf = kzalloc(len, GFP_KERNEL);
518 	if (!buf)
519 		return -ENOMEM;
520 
521 	ret = sysfs_create_link(&link->link_dev.kobj, &sup->kobj, "supplier");
522 	if (ret)
523 		goto out;
524 
525 	ret = sysfs_create_link(&link->link_dev.kobj, &con->kobj, "consumer");
526 	if (ret)
527 		goto err_con;
528 
529 	snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
530 	ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf);
531 	if (ret)
532 		goto err_con_dev;
533 
534 	snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
535 	ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf);
536 	if (ret)
537 		goto err_sup_dev;
538 
539 	goto out;
540 
541 err_sup_dev:
542 	snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
543 	sysfs_remove_link(&sup->kobj, buf);
544 err_con_dev:
545 	sysfs_remove_link(&link->link_dev.kobj, "consumer");
546 err_con:
547 	sysfs_remove_link(&link->link_dev.kobj, "supplier");
548 out:
549 	kfree(buf);
550 	return ret;
551 }
552 
devlink_remove_symlinks(struct device * dev,struct class_interface * class_intf)553 static void devlink_remove_symlinks(struct device *dev,
554 				   struct class_interface *class_intf)
555 {
556 	struct device_link *link = to_devlink(dev);
557 	size_t len;
558 	struct device *sup = link->supplier;
559 	struct device *con = link->consumer;
560 	char *buf;
561 
562 	sysfs_remove_link(&link->link_dev.kobj, "consumer");
563 	sysfs_remove_link(&link->link_dev.kobj, "supplier");
564 
565 	len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
566 		  strlen(dev_bus_name(con)) + strlen(dev_name(con)));
567 	len += strlen(":");
568 	len += strlen("supplier:") + 1;
569 	buf = kzalloc(len, GFP_KERNEL);
570 	if (!buf) {
571 		WARN(1, "Unable to properly free device link symlinks!\n");
572 		return;
573 	}
574 
575 	if (device_is_registered(con)) {
576 		snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
577 		sysfs_remove_link(&con->kobj, buf);
578 	}
579 	snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
580 	sysfs_remove_link(&sup->kobj, buf);
581 	kfree(buf);
582 }
583 
584 static struct class_interface devlink_class_intf = {
585 	.class = &devlink_class,
586 	.add_dev = devlink_add_symlinks,
587 	.remove_dev = devlink_remove_symlinks,
588 };
589 
devlink_class_init(void)590 static int __init devlink_class_init(void)
591 {
592 	int ret;
593 
594 	ret = class_register(&devlink_class);
595 	if (ret)
596 		return ret;
597 
598 	ret = class_interface_register(&devlink_class_intf);
599 	if (ret)
600 		class_unregister(&devlink_class);
601 
602 	return ret;
603 }
604 postcore_initcall(devlink_class_init);
605 
606 #define DL_MANAGED_LINK_FLAGS (DL_FLAG_AUTOREMOVE_CONSUMER | \
607 			       DL_FLAG_AUTOREMOVE_SUPPLIER | \
608 			       DL_FLAG_AUTOPROBE_CONSUMER  | \
609 			       DL_FLAG_SYNC_STATE_ONLY | \
610 			       DL_FLAG_INFERRED)
611 
612 #define DL_ADD_VALID_FLAGS (DL_MANAGED_LINK_FLAGS | DL_FLAG_STATELESS | \
613 			    DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE)
614 
615 /**
616  * device_link_add - Create a link between two devices.
617  * @consumer: Consumer end of the link.
618  * @supplier: Supplier end of the link.
619  * @flags: Link flags.
620  *
621  * The caller is responsible for the proper synchronization of the link creation
622  * with runtime PM.  First, setting the DL_FLAG_PM_RUNTIME flag will cause the
623  * runtime PM framework to take the link into account.  Second, if the
624  * DL_FLAG_RPM_ACTIVE flag is set in addition to it, the supplier devices will
625  * be forced into the active metastate and reference-counted upon the creation
626  * of the link.  If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be
627  * ignored.
628  *
629  * If DL_FLAG_STATELESS is set in @flags, the caller of this function is
630  * expected to release the link returned by it directly with the help of either
631  * device_link_del() or device_link_remove().
632  *
633  * If that flag is not set, however, the caller of this function is handing the
634  * management of the link over to the driver core entirely and its return value
635  * can only be used to check whether or not the link is present.  In that case,
636  * the DL_FLAG_AUTOREMOVE_CONSUMER and DL_FLAG_AUTOREMOVE_SUPPLIER device link
637  * flags can be used to indicate to the driver core when the link can be safely
638  * deleted.  Namely, setting one of them in @flags indicates to the driver core
639  * that the link is not going to be used (by the given caller of this function)
640  * after unbinding the consumer or supplier driver, respectively, from its
641  * device, so the link can be deleted at that point.  If none of them is set,
642  * the link will be maintained until one of the devices pointed to by it (either
643  * the consumer or the supplier) is unregistered.
644  *
645  * Also, if DL_FLAG_STATELESS, DL_FLAG_AUTOREMOVE_CONSUMER and
646  * DL_FLAG_AUTOREMOVE_SUPPLIER are not set in @flags (that is, a persistent
647  * managed device link is being added), the DL_FLAG_AUTOPROBE_CONSUMER flag can
648  * be used to request the driver core to automaticall probe for a consmer
649  * driver after successfully binding a driver to the supplier device.
650  *
651  * The combination of DL_FLAG_STATELESS and one of DL_FLAG_AUTOREMOVE_CONSUMER,
652  * DL_FLAG_AUTOREMOVE_SUPPLIER, or DL_FLAG_AUTOPROBE_CONSUMER set in @flags at
653  * the same time is invalid and will cause NULL to be returned upfront.
654  * However, if a device link between the given @consumer and @supplier pair
655  * exists already when this function is called for them, the existing link will
656  * be returned regardless of its current type and status (the link's flags may
657  * be modified then).  The caller of this function is then expected to treat
658  * the link as though it has just been created, so (in particular) if
659  * DL_FLAG_STATELESS was passed in @flags, the link needs to be released
660  * explicitly when not needed any more (as stated above).
661  *
662  * A side effect of the link creation is re-ordering of dpm_list and the
663  * devices_kset list by moving the consumer device and all devices depending
664  * on it to the ends of these lists (that does not happen to devices that have
665  * not been registered when this function is called).
666  *
667  * The supplier device is required to be registered when this function is called
668  * and NULL will be returned if that is not the case.  The consumer device need
669  * not be registered, however.
670  */
device_link_add(struct device * consumer,struct device * supplier,u32 flags)671 struct device_link *device_link_add(struct device *consumer,
672 				    struct device *supplier, u32 flags)
673 {
674 	struct device_link *link;
675 
676 	if (!consumer || !supplier || consumer == supplier ||
677 	    flags & ~DL_ADD_VALID_FLAGS ||
678 	    (flags & DL_FLAG_STATELESS && flags & DL_MANAGED_LINK_FLAGS) ||
679 	    (flags & DL_FLAG_SYNC_STATE_ONLY &&
680 	     (flags & ~DL_FLAG_INFERRED) != DL_FLAG_SYNC_STATE_ONLY) ||
681 	    (flags & DL_FLAG_AUTOPROBE_CONSUMER &&
682 	     flags & (DL_FLAG_AUTOREMOVE_CONSUMER |
683 		      DL_FLAG_AUTOREMOVE_SUPPLIER)))
684 		return NULL;
685 
686 	if (flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) {
687 		if (pm_runtime_get_sync(supplier) < 0) {
688 			pm_runtime_put_noidle(supplier);
689 			return NULL;
690 		}
691 	}
692 
693 	if (!(flags & DL_FLAG_STATELESS))
694 		flags |= DL_FLAG_MANAGED;
695 
696 	device_links_write_lock();
697 	device_pm_lock();
698 
699 	/*
700 	 * If the supplier has not been fully registered yet or there is a
701 	 * reverse (non-SYNC_STATE_ONLY) dependency between the consumer and
702 	 * the supplier already in the graph, return NULL. If the link is a
703 	 * SYNC_STATE_ONLY link, we don't check for reverse dependencies
704 	 * because it only affects sync_state() callbacks.
705 	 */
706 	if (!device_pm_initialized(supplier)
707 	    || (!(flags & DL_FLAG_SYNC_STATE_ONLY) &&
708 		  device_is_dependent(consumer, supplier))) {
709 		link = NULL;
710 		goto out;
711 	}
712 
713 	/*
714 	 * SYNC_STATE_ONLY links are useless once a consumer device has probed.
715 	 * So, only create it if the consumer hasn't probed yet.
716 	 */
717 	if (flags & DL_FLAG_SYNC_STATE_ONLY &&
718 	    consumer->links.status != DL_DEV_NO_DRIVER &&
719 	    consumer->links.status != DL_DEV_PROBING) {
720 		link = NULL;
721 		goto out;
722 	}
723 
724 	/*
725 	 * DL_FLAG_AUTOREMOVE_SUPPLIER indicates that the link will be needed
726 	 * longer than for DL_FLAG_AUTOREMOVE_CONSUMER and setting them both
727 	 * together doesn't make sense, so prefer DL_FLAG_AUTOREMOVE_SUPPLIER.
728 	 */
729 	if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
730 		flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
731 
732 	list_for_each_entry(link, &supplier->links.consumers, s_node) {
733 		if (link->consumer != consumer)
734 			continue;
735 
736 		if (link->flags & DL_FLAG_INFERRED &&
737 		    !(flags & DL_FLAG_INFERRED))
738 			link->flags &= ~DL_FLAG_INFERRED;
739 
740 		if (flags & DL_FLAG_PM_RUNTIME) {
741 			if (!(link->flags & DL_FLAG_PM_RUNTIME)) {
742 				pm_runtime_new_link(consumer);
743 				link->flags |= DL_FLAG_PM_RUNTIME;
744 			}
745 			if (flags & DL_FLAG_RPM_ACTIVE)
746 				refcount_inc(&link->rpm_active);
747 		}
748 
749 		if (flags & DL_FLAG_STATELESS) {
750 			kref_get(&link->kref);
751 			if (link->flags & DL_FLAG_SYNC_STATE_ONLY &&
752 			    !(link->flags & DL_FLAG_STATELESS)) {
753 				link->flags |= DL_FLAG_STATELESS;
754 				goto reorder;
755 			} else {
756 				link->flags |= DL_FLAG_STATELESS;
757 				goto out;
758 			}
759 		}
760 
761 		/*
762 		 * If the life time of the link following from the new flags is
763 		 * longer than indicated by the flags of the existing link,
764 		 * update the existing link to stay around longer.
765 		 */
766 		if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) {
767 			if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) {
768 				link->flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
769 				link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER;
770 			}
771 		} else if (!(flags & DL_FLAG_AUTOREMOVE_CONSUMER)) {
772 			link->flags &= ~(DL_FLAG_AUTOREMOVE_CONSUMER |
773 					 DL_FLAG_AUTOREMOVE_SUPPLIER);
774 		}
775 		if (!(link->flags & DL_FLAG_MANAGED)) {
776 			kref_get(&link->kref);
777 			link->flags |= DL_FLAG_MANAGED;
778 			device_link_init_status(link, consumer, supplier);
779 		}
780 		if (link->flags & DL_FLAG_SYNC_STATE_ONLY &&
781 		    !(flags & DL_FLAG_SYNC_STATE_ONLY)) {
782 			link->flags &= ~DL_FLAG_SYNC_STATE_ONLY;
783 			goto reorder;
784 		}
785 
786 		goto out;
787 	}
788 
789 	link = kzalloc(sizeof(*link), GFP_KERNEL);
790 	if (!link)
791 		goto out;
792 
793 	refcount_set(&link->rpm_active, 1);
794 
795 	get_device(supplier);
796 	link->supplier = supplier;
797 	INIT_LIST_HEAD(&link->s_node);
798 	get_device(consumer);
799 	link->consumer = consumer;
800 	INIT_LIST_HEAD(&link->c_node);
801 	link->flags = flags;
802 	kref_init(&link->kref);
803 
804 	link->link_dev.class = &devlink_class;
805 	device_set_pm_not_required(&link->link_dev);
806 	dev_set_name(&link->link_dev, "%s:%s--%s:%s",
807 		     dev_bus_name(supplier), dev_name(supplier),
808 		     dev_bus_name(consumer), dev_name(consumer));
809 	if (device_register(&link->link_dev)) {
810 		put_device(&link->link_dev);
811 		link = NULL;
812 		goto out;
813 	}
814 
815 	if (flags & DL_FLAG_PM_RUNTIME) {
816 		if (flags & DL_FLAG_RPM_ACTIVE)
817 			refcount_inc(&link->rpm_active);
818 
819 		pm_runtime_new_link(consumer);
820 	}
821 
822 	/* Determine the initial link state. */
823 	if (flags & DL_FLAG_STATELESS)
824 		link->status = DL_STATE_NONE;
825 	else
826 		device_link_init_status(link, consumer, supplier);
827 
828 	/*
829 	 * Some callers expect the link creation during consumer driver probe to
830 	 * resume the supplier even without DL_FLAG_RPM_ACTIVE.
831 	 */
832 	if (link->status == DL_STATE_CONSUMER_PROBE &&
833 	    flags & DL_FLAG_PM_RUNTIME)
834 		pm_runtime_resume(supplier);
835 
836 	list_add_tail_rcu(&link->s_node, &supplier->links.consumers);
837 	list_add_tail_rcu(&link->c_node, &consumer->links.suppliers);
838 
839 	if (flags & DL_FLAG_SYNC_STATE_ONLY) {
840 		dev_dbg(consumer,
841 			"Linked as a sync state only consumer to %s\n",
842 			dev_name(supplier));
843 		goto out;
844 	}
845 
846 reorder:
847 	/*
848 	 * Move the consumer and all of the devices depending on it to the end
849 	 * of dpm_list and the devices_kset list.
850 	 *
851 	 * It is necessary to hold dpm_list locked throughout all that or else
852 	 * we may end up suspending with a wrong ordering of it.
853 	 */
854 	device_reorder_to_tail(consumer, NULL);
855 
856 	dev_dbg(consumer, "Linked as a consumer to %s\n", dev_name(supplier));
857 
858 out:
859 	device_pm_unlock();
860 	device_links_write_unlock();
861 
862 	if ((flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) && !link)
863 		pm_runtime_put(supplier);
864 
865 	return link;
866 }
867 EXPORT_SYMBOL_GPL(device_link_add);
868 
__device_link_del(struct kref * kref)869 static void __device_link_del(struct kref *kref)
870 {
871 	struct device_link *link = container_of(kref, struct device_link, kref);
872 
873 	dev_dbg(link->consumer, "Dropping the link to %s\n",
874 		dev_name(link->supplier));
875 
876 	pm_runtime_drop_link(link);
877 
878 	device_link_remove_from_lists(link);
879 	device_unregister(&link->link_dev);
880 }
881 
device_link_put_kref(struct device_link * link)882 static void device_link_put_kref(struct device_link *link)
883 {
884 	if (link->flags & DL_FLAG_STATELESS)
885 		kref_put(&link->kref, __device_link_del);
886 	else if (!device_is_registered(link->consumer))
887 		__device_link_del(&link->kref);
888 	else
889 		WARN(1, "Unable to drop a managed device link reference\n");
890 }
891 
892 /**
893  * device_link_del - Delete a stateless link between two devices.
894  * @link: Device link to delete.
895  *
896  * The caller must ensure proper synchronization of this function with runtime
897  * PM.  If the link was added multiple times, it needs to be deleted as often.
898  * Care is required for hotplugged devices:  Their links are purged on removal
899  * and calling device_link_del() is then no longer allowed.
900  */
device_link_del(struct device_link * link)901 void device_link_del(struct device_link *link)
902 {
903 	device_links_write_lock();
904 	device_link_put_kref(link);
905 	device_links_write_unlock();
906 }
907 EXPORT_SYMBOL_GPL(device_link_del);
908 
909 /**
910  * device_link_remove - Delete a stateless link between two devices.
911  * @consumer: Consumer end of the link.
912  * @supplier: Supplier end of the link.
913  *
914  * The caller must ensure proper synchronization of this function with runtime
915  * PM.
916  */
device_link_remove(void * consumer,struct device * supplier)917 void device_link_remove(void *consumer, struct device *supplier)
918 {
919 	struct device_link *link;
920 
921 	if (WARN_ON(consumer == supplier))
922 		return;
923 
924 	device_links_write_lock();
925 
926 	list_for_each_entry(link, &supplier->links.consumers, s_node) {
927 		if (link->consumer == consumer) {
928 			device_link_put_kref(link);
929 			break;
930 		}
931 	}
932 
933 	device_links_write_unlock();
934 }
935 EXPORT_SYMBOL_GPL(device_link_remove);
936 
device_links_missing_supplier(struct device * dev)937 static void device_links_missing_supplier(struct device *dev)
938 {
939 	struct device_link *link;
940 
941 	list_for_each_entry(link, &dev->links.suppliers, c_node) {
942 		if (link->status != DL_STATE_CONSUMER_PROBE)
943 			continue;
944 
945 		if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) {
946 			WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
947 		} else {
948 			WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
949 			WRITE_ONCE(link->status, DL_STATE_DORMANT);
950 		}
951 	}
952 }
953 
954 /**
955  * device_links_check_suppliers - Check presence of supplier drivers.
956  * @dev: Consumer device.
957  *
958  * Check links from this device to any suppliers.  Walk the list of the device's
959  * links to suppliers and see if all of them are available.  If not, simply
960  * return -EPROBE_DEFER.
961  *
962  * We need to guarantee that the supplier will not go away after the check has
963  * been positive here.  It only can go away in __device_release_driver() and
964  * that function  checks the device's links to consumers.  This means we need to
965  * mark the link as "consumer probe in progress" to make the supplier removal
966  * wait for us to complete (or bad things may happen).
967  *
968  * Links without the DL_FLAG_MANAGED flag set are ignored.
969  */
device_links_check_suppliers(struct device * dev)970 int device_links_check_suppliers(struct device *dev)
971 {
972 	struct device_link *link;
973 	int ret = 0;
974 
975 	/*
976 	 * Device waiting for supplier to become available is not allowed to
977 	 * probe.
978 	 */
979 	mutex_lock(&fwnode_link_lock);
980 	if (dev->fwnode && !list_empty(&dev->fwnode->suppliers) &&
981 	    !fw_devlink_is_permissive()) {
982 		dev_dbg(dev, "probe deferral - wait for supplier %pfwP\n",
983 			list_first_entry(&dev->fwnode->suppliers,
984 			struct fwnode_link,
985 			c_hook)->supplier);
986 		mutex_unlock(&fwnode_link_lock);
987 		return -EPROBE_DEFER;
988 	}
989 	mutex_unlock(&fwnode_link_lock);
990 
991 	device_links_write_lock();
992 
993 	list_for_each_entry(link, &dev->links.suppliers, c_node) {
994 		if (!(link->flags & DL_FLAG_MANAGED))
995 			continue;
996 
997 		if (link->status != DL_STATE_AVAILABLE &&
998 		    !(link->flags & DL_FLAG_SYNC_STATE_ONLY)) {
999 			device_links_missing_supplier(dev);
1000 			dev_dbg(dev, "probe deferral - supplier %s not ready\n",
1001 				dev_name(link->supplier));
1002 			ret = -EPROBE_DEFER;
1003 			break;
1004 		}
1005 		WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE);
1006 	}
1007 	dev->links.status = DL_DEV_PROBING;
1008 
1009 	device_links_write_unlock();
1010 	return ret;
1011 }
1012 
1013 /**
1014  * __device_links_queue_sync_state - Queue a device for sync_state() callback
1015  * @dev: Device to call sync_state() on
1016  * @list: List head to queue the @dev on
1017  *
1018  * Queues a device for a sync_state() callback when the device links write lock
1019  * isn't held. This allows the sync_state() execution flow to use device links
1020  * APIs.  The caller must ensure this function is called with
1021  * device_links_write_lock() held.
1022  *
1023  * This function does a get_device() to make sure the device is not freed while
1024  * on this list.
1025  *
1026  * So the caller must also ensure that device_links_flush_sync_list() is called
1027  * as soon as the caller releases device_links_write_lock().  This is necessary
1028  * to make sure the sync_state() is called in a timely fashion and the
1029  * put_device() is called on this device.
1030  */
__device_links_queue_sync_state(struct device * dev,struct list_head * list)1031 static void __device_links_queue_sync_state(struct device *dev,
1032 					    struct list_head *list)
1033 {
1034 	struct device_link *link;
1035 
1036 	if (!dev_has_sync_state(dev))
1037 		return;
1038 	if (dev->state_synced)
1039 		return;
1040 
1041 	list_for_each_entry(link, &dev->links.consumers, s_node) {
1042 		if (!(link->flags & DL_FLAG_MANAGED))
1043 			continue;
1044 		if (link->status != DL_STATE_ACTIVE)
1045 			return;
1046 	}
1047 
1048 	/*
1049 	 * Set the flag here to avoid adding the same device to a list more
1050 	 * than once. This can happen if new consumers get added to the device
1051 	 * and probed before the list is flushed.
1052 	 */
1053 	dev->state_synced = true;
1054 
1055 	if (WARN_ON(!list_empty(&dev->links.defer_sync)))
1056 		return;
1057 
1058 	get_device(dev);
1059 	list_add_tail(&dev->links.defer_sync, list);
1060 }
1061 
1062 /**
1063  * device_links_flush_sync_list - Call sync_state() on a list of devices
1064  * @list: List of devices to call sync_state() on
1065  * @dont_lock_dev: Device for which lock is already held by the caller
1066  *
1067  * Calls sync_state() on all the devices that have been queued for it. This
1068  * function is used in conjunction with __device_links_queue_sync_state(). The
1069  * @dont_lock_dev parameter is useful when this function is called from a
1070  * context where a device lock is already held.
1071  */
device_links_flush_sync_list(struct list_head * list,struct device * dont_lock_dev)1072 static void device_links_flush_sync_list(struct list_head *list,
1073 					 struct device *dont_lock_dev)
1074 {
1075 	struct device *dev, *tmp;
1076 
1077 	list_for_each_entry_safe(dev, tmp, list, links.defer_sync) {
1078 		list_del_init(&dev->links.defer_sync);
1079 
1080 		if (dev != dont_lock_dev)
1081 			device_lock(dev);
1082 
1083 		if (dev->bus->sync_state)
1084 			dev->bus->sync_state(dev);
1085 		else if (dev->driver && dev->driver->sync_state)
1086 			dev->driver->sync_state(dev);
1087 
1088 		if (dev != dont_lock_dev)
1089 			device_unlock(dev);
1090 
1091 		put_device(dev);
1092 	}
1093 }
1094 
device_links_supplier_sync_state_pause(void)1095 void device_links_supplier_sync_state_pause(void)
1096 {
1097 	device_links_write_lock();
1098 	defer_sync_state_count++;
1099 	device_links_write_unlock();
1100 }
1101 
device_links_supplier_sync_state_resume(void)1102 void device_links_supplier_sync_state_resume(void)
1103 {
1104 	struct device *dev, *tmp;
1105 	LIST_HEAD(sync_list);
1106 
1107 	device_links_write_lock();
1108 	if (!defer_sync_state_count) {
1109 		WARN(true, "Unmatched sync_state pause/resume!");
1110 		goto out;
1111 	}
1112 	defer_sync_state_count--;
1113 	if (defer_sync_state_count)
1114 		goto out;
1115 
1116 	list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_sync) {
1117 		/*
1118 		 * Delete from deferred_sync list before queuing it to
1119 		 * sync_list because defer_sync is used for both lists.
1120 		 */
1121 		list_del_init(&dev->links.defer_sync);
1122 		__device_links_queue_sync_state(dev, &sync_list);
1123 	}
1124 out:
1125 	device_links_write_unlock();
1126 
1127 	device_links_flush_sync_list(&sync_list, NULL);
1128 }
1129 
sync_state_resume_initcall(void)1130 static int sync_state_resume_initcall(void)
1131 {
1132 	device_links_supplier_sync_state_resume();
1133 	return 0;
1134 }
1135 late_initcall(sync_state_resume_initcall);
1136 
__device_links_supplier_defer_sync(struct device * sup)1137 static void __device_links_supplier_defer_sync(struct device *sup)
1138 {
1139 	if (list_empty(&sup->links.defer_sync) && dev_has_sync_state(sup))
1140 		list_add_tail(&sup->links.defer_sync, &deferred_sync);
1141 }
1142 
device_link_drop_managed(struct device_link * link)1143 static void device_link_drop_managed(struct device_link *link)
1144 {
1145 	link->flags &= ~DL_FLAG_MANAGED;
1146 	WRITE_ONCE(link->status, DL_STATE_NONE);
1147 	kref_put(&link->kref, __device_link_del);
1148 }
1149 
waiting_for_supplier_show(struct device * dev,struct device_attribute * attr,char * buf)1150 static ssize_t waiting_for_supplier_show(struct device *dev,
1151 					 struct device_attribute *attr,
1152 					 char *buf)
1153 {
1154 	bool val;
1155 
1156 	device_lock(dev);
1157 	val = !list_empty(&dev->fwnode->suppliers);
1158 	device_unlock(dev);
1159 	return sysfs_emit(buf, "%u\n", val);
1160 }
1161 static DEVICE_ATTR_RO(waiting_for_supplier);
1162 
1163 /**
1164  * device_links_driver_bound - Update device links after probing its driver.
1165  * @dev: Device to update the links for.
1166  *
1167  * The probe has been successful, so update links from this device to any
1168  * consumers by changing their status to "available".
1169  *
1170  * Also change the status of @dev's links to suppliers to "active".
1171  *
1172  * Links without the DL_FLAG_MANAGED flag set are ignored.
1173  */
device_links_driver_bound(struct device * dev)1174 void device_links_driver_bound(struct device *dev)
1175 {
1176 	struct device_link *link, *ln;
1177 	LIST_HEAD(sync_list);
1178 
1179 	/*
1180 	 * If a device binds successfully, it's expected to have created all
1181 	 * the device links it needs to or make new device links as it needs
1182 	 * them. So, fw_devlink no longer needs to create device links to any
1183 	 * of the device's suppliers.
1184 	 *
1185 	 * Also, if a child firmware node of this bound device is not added as
1186 	 * a device by now, assume it is never going to be added and make sure
1187 	 * other devices don't defer probe indefinitely by waiting for such a
1188 	 * child device.
1189 	 */
1190 	if (dev->fwnode && dev->fwnode->dev == dev) {
1191 		struct fwnode_handle *child;
1192 		fwnode_links_purge_suppliers(dev->fwnode);
1193 		fwnode_for_each_available_child_node(dev->fwnode, child)
1194 			fw_devlink_purge_absent_suppliers(child);
1195 	}
1196 	device_remove_file(dev, &dev_attr_waiting_for_supplier);
1197 
1198 	device_links_write_lock();
1199 
1200 	list_for_each_entry(link, &dev->links.consumers, s_node) {
1201 		if (!(link->flags & DL_FLAG_MANAGED))
1202 			continue;
1203 
1204 		/*
1205 		 * Links created during consumer probe may be in the "consumer
1206 		 * probe" state to start with if the supplier is still probing
1207 		 * when they are created and they may become "active" if the
1208 		 * consumer probe returns first.  Skip them here.
1209 		 */
1210 		if (link->status == DL_STATE_CONSUMER_PROBE ||
1211 		    link->status == DL_STATE_ACTIVE)
1212 			continue;
1213 
1214 		WARN_ON(link->status != DL_STATE_DORMANT);
1215 		WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
1216 
1217 		if (link->flags & DL_FLAG_AUTOPROBE_CONSUMER)
1218 			driver_deferred_probe_add(link->consumer);
1219 	}
1220 
1221 	if (defer_sync_state_count)
1222 		__device_links_supplier_defer_sync(dev);
1223 	else
1224 		__device_links_queue_sync_state(dev, &sync_list);
1225 
1226 	list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) {
1227 		struct device *supplier;
1228 
1229 		if (!(link->flags & DL_FLAG_MANAGED))
1230 			continue;
1231 
1232 		supplier = link->supplier;
1233 		if (link->flags & DL_FLAG_SYNC_STATE_ONLY) {
1234 			/*
1235 			 * When DL_FLAG_SYNC_STATE_ONLY is set, it means no
1236 			 * other DL_MANAGED_LINK_FLAGS have been set. So, it's
1237 			 * save to drop the managed link completely.
1238 			 */
1239 			device_link_drop_managed(link);
1240 		} else {
1241 			WARN_ON(link->status != DL_STATE_CONSUMER_PROBE);
1242 			WRITE_ONCE(link->status, DL_STATE_ACTIVE);
1243 		}
1244 
1245 		/*
1246 		 * This needs to be done even for the deleted
1247 		 * DL_FLAG_SYNC_STATE_ONLY device link in case it was the last
1248 		 * device link that was preventing the supplier from getting a
1249 		 * sync_state() call.
1250 		 */
1251 		if (defer_sync_state_count)
1252 			__device_links_supplier_defer_sync(supplier);
1253 		else
1254 			__device_links_queue_sync_state(supplier, &sync_list);
1255 	}
1256 
1257 	dev->links.status = DL_DEV_DRIVER_BOUND;
1258 
1259 	device_links_write_unlock();
1260 
1261 	device_links_flush_sync_list(&sync_list, dev);
1262 }
1263 
1264 /**
1265  * __device_links_no_driver - Update links of a device without a driver.
1266  * @dev: Device without a drvier.
1267  *
1268  * Delete all non-persistent links from this device to any suppliers.
1269  *
1270  * Persistent links stay around, but their status is changed to "available",
1271  * unless they already are in the "supplier unbind in progress" state in which
1272  * case they need not be updated.
1273  *
1274  * Links without the DL_FLAG_MANAGED flag set are ignored.
1275  */
__device_links_no_driver(struct device * dev)1276 static void __device_links_no_driver(struct device *dev)
1277 {
1278 	struct device_link *link, *ln;
1279 
1280 	list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
1281 		if (!(link->flags & DL_FLAG_MANAGED))
1282 			continue;
1283 
1284 		if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) {
1285 			device_link_drop_managed(link);
1286 			continue;
1287 		}
1288 
1289 		if (link->status != DL_STATE_CONSUMER_PROBE &&
1290 		    link->status != DL_STATE_ACTIVE)
1291 			continue;
1292 
1293 		if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) {
1294 			WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
1295 		} else {
1296 			WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
1297 			WRITE_ONCE(link->status, DL_STATE_DORMANT);
1298 		}
1299 	}
1300 
1301 	dev->links.status = DL_DEV_NO_DRIVER;
1302 }
1303 
1304 /**
1305  * device_links_no_driver - Update links after failing driver probe.
1306  * @dev: Device whose driver has just failed to probe.
1307  *
1308  * Clean up leftover links to consumers for @dev and invoke
1309  * %__device_links_no_driver() to update links to suppliers for it as
1310  * appropriate.
1311  *
1312  * Links without the DL_FLAG_MANAGED flag set are ignored.
1313  */
device_links_no_driver(struct device * dev)1314 void device_links_no_driver(struct device *dev)
1315 {
1316 	struct device_link *link;
1317 
1318 	device_links_write_lock();
1319 
1320 	list_for_each_entry(link, &dev->links.consumers, s_node) {
1321 		if (!(link->flags & DL_FLAG_MANAGED))
1322 			continue;
1323 
1324 		/*
1325 		 * The probe has failed, so if the status of the link is
1326 		 * "consumer probe" or "active", it must have been added by
1327 		 * a probing consumer while this device was still probing.
1328 		 * Change its state to "dormant", as it represents a valid
1329 		 * relationship, but it is not functionally meaningful.
1330 		 */
1331 		if (link->status == DL_STATE_CONSUMER_PROBE ||
1332 		    link->status == DL_STATE_ACTIVE)
1333 			WRITE_ONCE(link->status, DL_STATE_DORMANT);
1334 	}
1335 
1336 	__device_links_no_driver(dev);
1337 
1338 	device_links_write_unlock();
1339 }
1340 
1341 /**
1342  * device_links_driver_cleanup - Update links after driver removal.
1343  * @dev: Device whose driver has just gone away.
1344  *
1345  * Update links to consumers for @dev by changing their status to "dormant" and
1346  * invoke %__device_links_no_driver() to update links to suppliers for it as
1347  * appropriate.
1348  *
1349  * Links without the DL_FLAG_MANAGED flag set are ignored.
1350  */
device_links_driver_cleanup(struct device * dev)1351 void device_links_driver_cleanup(struct device *dev)
1352 {
1353 	struct device_link *link, *ln;
1354 
1355 	device_links_write_lock();
1356 
1357 	list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) {
1358 		if (!(link->flags & DL_FLAG_MANAGED))
1359 			continue;
1360 
1361 		WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER);
1362 		WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND);
1363 
1364 		/*
1365 		 * autoremove the links between this @dev and its consumer
1366 		 * devices that are not active, i.e. where the link state
1367 		 * has moved to DL_STATE_SUPPLIER_UNBIND.
1368 		 */
1369 		if (link->status == DL_STATE_SUPPLIER_UNBIND &&
1370 		    link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
1371 			device_link_drop_managed(link);
1372 
1373 		WRITE_ONCE(link->status, DL_STATE_DORMANT);
1374 	}
1375 
1376 	list_del_init(&dev->links.defer_sync);
1377 	__device_links_no_driver(dev);
1378 
1379 	device_links_write_unlock();
1380 }
1381 
1382 /**
1383  * device_links_busy - Check if there are any busy links to consumers.
1384  * @dev: Device to check.
1385  *
1386  * Check each consumer of the device and return 'true' if its link's status
1387  * is one of "consumer probe" or "active" (meaning that the given consumer is
1388  * probing right now or its driver is present).  Otherwise, change the link
1389  * state to "supplier unbind" to prevent the consumer from being probed
1390  * successfully going forward.
1391  *
1392  * Return 'false' if there are no probing or active consumers.
1393  *
1394  * Links without the DL_FLAG_MANAGED flag set are ignored.
1395  */
device_links_busy(struct device * dev)1396 bool device_links_busy(struct device *dev)
1397 {
1398 	struct device_link *link;
1399 	bool ret = false;
1400 
1401 	device_links_write_lock();
1402 
1403 	list_for_each_entry(link, &dev->links.consumers, s_node) {
1404 		if (!(link->flags & DL_FLAG_MANAGED))
1405 			continue;
1406 
1407 		if (link->status == DL_STATE_CONSUMER_PROBE
1408 		    || link->status == DL_STATE_ACTIVE) {
1409 			ret = true;
1410 			break;
1411 		}
1412 		WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
1413 	}
1414 
1415 	dev->links.status = DL_DEV_UNBINDING;
1416 
1417 	device_links_write_unlock();
1418 	return ret;
1419 }
1420 
1421 /**
1422  * device_links_unbind_consumers - Force unbind consumers of the given device.
1423  * @dev: Device to unbind the consumers of.
1424  *
1425  * Walk the list of links to consumers for @dev and if any of them is in the
1426  * "consumer probe" state, wait for all device probes in progress to complete
1427  * and start over.
1428  *
1429  * If that's not the case, change the status of the link to "supplier unbind"
1430  * and check if the link was in the "active" state.  If so, force the consumer
1431  * driver to unbind and start over (the consumer will not re-probe as we have
1432  * changed the state of the link already).
1433  *
1434  * Links without the DL_FLAG_MANAGED flag set are ignored.
1435  */
device_links_unbind_consumers(struct device * dev)1436 void device_links_unbind_consumers(struct device *dev)
1437 {
1438 	struct device_link *link;
1439 
1440  start:
1441 	device_links_write_lock();
1442 
1443 	list_for_each_entry(link, &dev->links.consumers, s_node) {
1444 		enum device_link_state status;
1445 
1446 		if (!(link->flags & DL_FLAG_MANAGED) ||
1447 		    link->flags & DL_FLAG_SYNC_STATE_ONLY)
1448 			continue;
1449 
1450 		status = link->status;
1451 		if (status == DL_STATE_CONSUMER_PROBE) {
1452 			device_links_write_unlock();
1453 
1454 			wait_for_device_probe();
1455 			goto start;
1456 		}
1457 		WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
1458 		if (status == DL_STATE_ACTIVE) {
1459 			struct device *consumer = link->consumer;
1460 
1461 			get_device(consumer);
1462 
1463 			device_links_write_unlock();
1464 
1465 			device_release_driver_internal(consumer, NULL,
1466 						       consumer->parent);
1467 			put_device(consumer);
1468 			goto start;
1469 		}
1470 	}
1471 
1472 	device_links_write_unlock();
1473 }
1474 
1475 /**
1476  * device_links_purge - Delete existing links to other devices.
1477  * @dev: Target device.
1478  */
device_links_purge(struct device * dev)1479 static void device_links_purge(struct device *dev)
1480 {
1481 	struct device_link *link, *ln;
1482 
1483 	if (dev->class == &devlink_class)
1484 		return;
1485 
1486 	/*
1487 	 * Delete all of the remaining links from this device to any other
1488 	 * devices (either consumers or suppliers).
1489 	 */
1490 	device_links_write_lock();
1491 
1492 	list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
1493 		WARN_ON(link->status == DL_STATE_ACTIVE);
1494 		__device_link_del(&link->kref);
1495 	}
1496 
1497 	list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) {
1498 		WARN_ON(link->status != DL_STATE_DORMANT &&
1499 			link->status != DL_STATE_NONE);
1500 		__device_link_del(&link->kref);
1501 	}
1502 
1503 	device_links_write_unlock();
1504 }
1505 
1506 #define FW_DEVLINK_FLAGS_PERMISSIVE	(DL_FLAG_INFERRED | \
1507 					 DL_FLAG_SYNC_STATE_ONLY)
1508 #define FW_DEVLINK_FLAGS_ON		(DL_FLAG_INFERRED | \
1509 					 DL_FLAG_AUTOPROBE_CONSUMER)
1510 #define FW_DEVLINK_FLAGS_RPM		(FW_DEVLINK_FLAGS_ON | \
1511 					 DL_FLAG_PM_RUNTIME)
1512 
1513 static u32 fw_devlink_flags = FW_DEVLINK_FLAGS_ON;
fw_devlink_setup(char * arg)1514 static int __init fw_devlink_setup(char *arg)
1515 {
1516 	if (!arg)
1517 		return -EINVAL;
1518 
1519 	if (strcmp(arg, "off") == 0) {
1520 		fw_devlink_flags = 0;
1521 	} else if (strcmp(arg, "permissive") == 0) {
1522 		fw_devlink_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
1523 	} else if (strcmp(arg, "on") == 0) {
1524 		fw_devlink_flags = FW_DEVLINK_FLAGS_ON;
1525 	} else if (strcmp(arg, "rpm") == 0) {
1526 		fw_devlink_flags = FW_DEVLINK_FLAGS_RPM;
1527 	}
1528 	return 0;
1529 }
1530 early_param("fw_devlink", fw_devlink_setup);
1531 
1532 static bool fw_devlink_strict = true;
fw_devlink_strict_setup(char * arg)1533 static int __init fw_devlink_strict_setup(char *arg)
1534 {
1535 	return strtobool(arg, &fw_devlink_strict);
1536 }
1537 early_param("fw_devlink.strict", fw_devlink_strict_setup);
1538 
fw_devlink_get_flags(void)1539 u32 fw_devlink_get_flags(void)
1540 {
1541 	return fw_devlink_flags;
1542 }
1543 
fw_devlink_is_permissive(void)1544 static bool fw_devlink_is_permissive(void)
1545 {
1546 	return fw_devlink_flags == FW_DEVLINK_FLAGS_PERMISSIVE;
1547 }
1548 
fw_devlink_is_strict(void)1549 bool fw_devlink_is_strict(void)
1550 {
1551 	return fw_devlink_strict && !fw_devlink_is_permissive();
1552 }
1553 
fw_devlink_parse_fwnode(struct fwnode_handle * fwnode)1554 static void fw_devlink_parse_fwnode(struct fwnode_handle *fwnode)
1555 {
1556 	if (fwnode->flags & FWNODE_FLAG_LINKS_ADDED)
1557 		return;
1558 
1559 	fwnode_call_int_op(fwnode, add_links);
1560 	fwnode->flags |= FWNODE_FLAG_LINKS_ADDED;
1561 }
1562 
fw_devlink_parse_fwtree(struct fwnode_handle * fwnode)1563 static void fw_devlink_parse_fwtree(struct fwnode_handle *fwnode)
1564 {
1565 	struct fwnode_handle *child = NULL;
1566 
1567 	fw_devlink_parse_fwnode(fwnode);
1568 
1569 	while ((child = fwnode_get_next_available_child_node(fwnode, child)))
1570 		fw_devlink_parse_fwtree(child);
1571 }
1572 
1573 /**
1574  * fw_devlink_relax_cycle - Convert cyclic links to SYNC_STATE_ONLY links
1575  * @con: Device to check dependencies for.
1576  * @sup: Device to check against.
1577  *
1578  * Check if @sup depends on @con or any device dependent on it (its child or
1579  * its consumer etc).  When such a cyclic dependency is found, convert all
1580  * device links created solely by fw_devlink into SYNC_STATE_ONLY device links.
1581  * This is the equivalent of doing fw_devlink=permissive just between the
1582  * devices in the cycle. We need to do this because, at this point, fw_devlink
1583  * can't tell which of these dependencies is not a real dependency.
1584  *
1585  * Return 1 if a cycle is found. Otherwise, return 0.
1586  */
fw_devlink_relax_cycle(struct device * con,void * sup)1587 int fw_devlink_relax_cycle(struct device *con, void *sup)
1588 {
1589 	struct device_link *link;
1590 	int ret;
1591 
1592 	if (con == sup)
1593 		return 1;
1594 
1595 	ret = device_for_each_child(con, sup, fw_devlink_relax_cycle);
1596 	if (ret)
1597 		return ret;
1598 
1599 	list_for_each_entry(link, &con->links.consumers, s_node) {
1600 		if ((link->flags & ~DL_FLAG_INFERRED) ==
1601 		    (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
1602 			continue;
1603 
1604 		if (!fw_devlink_relax_cycle(link->consumer, sup))
1605 			continue;
1606 
1607 		ret = 1;
1608 
1609 		if (!(link->flags & DL_FLAG_INFERRED))
1610 			continue;
1611 
1612 		pm_runtime_drop_link(link);
1613 		link->flags = DL_FLAG_MANAGED | FW_DEVLINK_FLAGS_PERMISSIVE;
1614 		dev_dbg(link->consumer, "Relaxing link with %s\n",
1615 			dev_name(link->supplier));
1616 	}
1617 	return ret;
1618 }
1619 
1620 /**
1621  * fw_devlink_create_devlink - Create a device link from a consumer to fwnode
1622  * @con - Consumer device for the device link
1623  * @sup_handle - fwnode handle of supplier
1624  *
1625  * This function will try to create a device link between the consumer device
1626  * @con and the supplier device represented by @sup_handle.
1627  *
1628  * The supplier has to be provided as a fwnode because incorrect cycles in
1629  * fwnode links can sometimes cause the supplier device to never be created.
1630  * This function detects such cases and returns an error if it cannot create a
1631  * device link from the consumer to a missing supplier.
1632  *
1633  * Returns,
1634  * 0 on successfully creating a device link
1635  * -EINVAL if the device link cannot be created as expected
1636  * -EAGAIN if the device link cannot be created right now, but it may be
1637  *  possible to do that in the future
1638  */
fw_devlink_create_devlink(struct device * con,struct fwnode_handle * sup_handle,u32 flags)1639 static int fw_devlink_create_devlink(struct device *con,
1640 				     struct fwnode_handle *sup_handle, u32 flags)
1641 {
1642 	struct device *sup_dev;
1643 	int ret = 0;
1644 
1645 	sup_dev = get_dev_from_fwnode(sup_handle);
1646 	if (sup_dev) {
1647 		/*
1648 		 * If it's one of those drivers that don't actually bind to
1649 		 * their device using driver core, then don't wait on this
1650 		 * supplier device indefinitely.
1651 		 */
1652 		if (sup_dev->links.status == DL_DEV_NO_DRIVER &&
1653 		    sup_handle->flags & FWNODE_FLAG_INITIALIZED) {
1654 			ret = -EINVAL;
1655 			goto out;
1656 		}
1657 
1658 		/*
1659 		 * If this fails, it is due to cycles in device links.  Just
1660 		 * give up on this link and treat it as invalid.
1661 		 */
1662 		if (!device_link_add(con, sup_dev, flags) &&
1663 		    !(flags & DL_FLAG_SYNC_STATE_ONLY)) {
1664 			dev_info(con, "Fixing up cyclic dependency with %s\n",
1665 				 dev_name(sup_dev));
1666 			device_links_write_lock();
1667 			fw_devlink_relax_cycle(con, sup_dev);
1668 			device_links_write_unlock();
1669 			device_link_add(con, sup_dev,
1670 					FW_DEVLINK_FLAGS_PERMISSIVE);
1671 			ret = -EINVAL;
1672 		}
1673 
1674 		goto out;
1675 	}
1676 
1677 	/* Supplier that's already initialized without a struct device. */
1678 	if (sup_handle->flags & FWNODE_FLAG_INITIALIZED)
1679 		return -EINVAL;
1680 
1681 	/*
1682 	 * DL_FLAG_SYNC_STATE_ONLY doesn't block probing and supports
1683 	 * cycles. So cycle detection isn't necessary and shouldn't be
1684 	 * done.
1685 	 */
1686 	if (flags & DL_FLAG_SYNC_STATE_ONLY)
1687 		return -EAGAIN;
1688 
1689 	/*
1690 	 * If we can't find the supplier device from its fwnode, it might be
1691 	 * due to a cyclic dependency between fwnodes. Some of these cycles can
1692 	 * be broken by applying logic. Check for these types of cycles and
1693 	 * break them so that devices in the cycle probe properly.
1694 	 *
1695 	 * If the supplier's parent is dependent on the consumer, then the
1696 	 * consumer and supplier have a cyclic dependency. Since fw_devlink
1697 	 * can't tell which of the inferred dependencies are incorrect, don't
1698 	 * enforce probe ordering between any of the devices in this cyclic
1699 	 * dependency. Do this by relaxing all the fw_devlink device links in
1700 	 * this cycle and by treating the fwnode link between the consumer and
1701 	 * the supplier as an invalid dependency.
1702 	 */
1703 	sup_dev = fwnode_get_next_parent_dev(sup_handle);
1704 	if (sup_dev && device_is_dependent(con, sup_dev)) {
1705 		dev_info(con, "Fixing up cyclic dependency with %pfwP (%s)\n",
1706 			 sup_handle, dev_name(sup_dev));
1707 		device_links_write_lock();
1708 		fw_devlink_relax_cycle(con, sup_dev);
1709 		device_links_write_unlock();
1710 		ret = -EINVAL;
1711 	} else {
1712 		/*
1713 		 * Can't check for cycles or no cycles. So let's try
1714 		 * again later.
1715 		 */
1716 		ret = -EAGAIN;
1717 	}
1718 
1719 out:
1720 	put_device(sup_dev);
1721 	return ret;
1722 }
1723 
1724 /**
1725  * __fw_devlink_link_to_consumers - Create device links to consumers of a device
1726  * @dev - Device that needs to be linked to its consumers
1727  *
1728  * This function looks at all the consumer fwnodes of @dev and creates device
1729  * links between the consumer device and @dev (supplier).
1730  *
1731  * If the consumer device has not been added yet, then this function creates a
1732  * SYNC_STATE_ONLY link between @dev (supplier) and the closest ancestor device
1733  * of the consumer fwnode. This is necessary to make sure @dev doesn't get a
1734  * sync_state() callback before the real consumer device gets to be added and
1735  * then probed.
1736  *
1737  * Once device links are created from the real consumer to @dev (supplier), the
1738  * fwnode links are deleted.
1739  */
__fw_devlink_link_to_consumers(struct device * dev)1740 static void __fw_devlink_link_to_consumers(struct device *dev)
1741 {
1742 	struct fwnode_handle *fwnode = dev->fwnode;
1743 	struct fwnode_link *link, *tmp;
1744 
1745 	list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook) {
1746 		u32 dl_flags = fw_devlink_get_flags();
1747 		struct device *con_dev;
1748 		bool own_link = true;
1749 		int ret;
1750 
1751 		con_dev = get_dev_from_fwnode(link->consumer);
1752 		/*
1753 		 * If consumer device is not available yet, make a "proxy"
1754 		 * SYNC_STATE_ONLY link from the consumer's parent device to
1755 		 * the supplier device. This is necessary to make sure the
1756 		 * supplier doesn't get a sync_state() callback before the real
1757 		 * consumer can create a device link to the supplier.
1758 		 *
1759 		 * This proxy link step is needed to handle the case where the
1760 		 * consumer's parent device is added before the supplier.
1761 		 */
1762 		if (!con_dev) {
1763 			con_dev = fwnode_get_next_parent_dev(link->consumer);
1764 			/*
1765 			 * However, if the consumer's parent device is also the
1766 			 * parent of the supplier, don't create a
1767 			 * consumer-supplier link from the parent to its child
1768 			 * device. Such a dependency is impossible.
1769 			 */
1770 			if (con_dev &&
1771 			    fwnode_is_ancestor_of(con_dev->fwnode, fwnode)) {
1772 				put_device(con_dev);
1773 				con_dev = NULL;
1774 			} else {
1775 				own_link = false;
1776 				dl_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
1777 			}
1778 		}
1779 
1780 		if (!con_dev)
1781 			continue;
1782 
1783 		ret = fw_devlink_create_devlink(con_dev, fwnode, dl_flags);
1784 		put_device(con_dev);
1785 		if (!own_link || ret == -EAGAIN)
1786 			continue;
1787 
1788 		list_del(&link->s_hook);
1789 		list_del(&link->c_hook);
1790 		kfree(link);
1791 	}
1792 }
1793 
1794 /**
1795  * __fw_devlink_link_to_suppliers - Create device links to suppliers of a device
1796  * @dev - The consumer device that needs to be linked to its suppliers
1797  * @fwnode - Root of the fwnode tree that is used to create device links
1798  *
1799  * This function looks at all the supplier fwnodes of fwnode tree rooted at
1800  * @fwnode and creates device links between @dev (consumer) and all the
1801  * supplier devices of the entire fwnode tree at @fwnode.
1802  *
1803  * The function creates normal (non-SYNC_STATE_ONLY) device links between @dev
1804  * and the real suppliers of @dev. Once these device links are created, the
1805  * fwnode links are deleted. When such device links are successfully created,
1806  * this function is called recursively on those supplier devices. This is
1807  * needed to detect and break some invalid cycles in fwnode links.  See
1808  * fw_devlink_create_devlink() for more details.
1809  *
1810  * In addition, it also looks at all the suppliers of the entire fwnode tree
1811  * because some of the child devices of @dev that have not been added yet
1812  * (because @dev hasn't probed) might already have their suppliers added to
1813  * driver core. So, this function creates SYNC_STATE_ONLY device links between
1814  * @dev (consumer) and these suppliers to make sure they don't execute their
1815  * sync_state() callbacks before these child devices have a chance to create
1816  * their device links. The fwnode links that correspond to the child devices
1817  * aren't delete because they are needed later to create the device links
1818  * between the real consumer and supplier devices.
1819  */
__fw_devlink_link_to_suppliers(struct device * dev,struct fwnode_handle * fwnode)1820 static void __fw_devlink_link_to_suppliers(struct device *dev,
1821 					   struct fwnode_handle *fwnode)
1822 {
1823 	bool own_link = (dev->fwnode == fwnode);
1824 	struct fwnode_link *link, *tmp;
1825 	struct fwnode_handle *child = NULL;
1826 	u32 dl_flags;
1827 
1828 	if (own_link)
1829 		dl_flags = fw_devlink_get_flags();
1830 	else
1831 		dl_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
1832 
1833 	list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) {
1834 		int ret;
1835 		struct device *sup_dev;
1836 		struct fwnode_handle *sup = link->supplier;
1837 
1838 		ret = fw_devlink_create_devlink(dev, sup, dl_flags);
1839 		if (!own_link || ret == -EAGAIN)
1840 			continue;
1841 
1842 		list_del(&link->s_hook);
1843 		list_del(&link->c_hook);
1844 		kfree(link);
1845 
1846 		/* If no device link was created, nothing more to do. */
1847 		if (ret)
1848 			continue;
1849 
1850 		/*
1851 		 * If a device link was successfully created to a supplier, we
1852 		 * now need to try and link the supplier to all its suppliers.
1853 		 *
1854 		 * This is needed to detect and delete false dependencies in
1855 		 * fwnode links that haven't been converted to a device link
1856 		 * yet. See comments in fw_devlink_create_devlink() for more
1857 		 * details on the false dependency.
1858 		 *
1859 		 * Without deleting these false dependencies, some devices will
1860 		 * never probe because they'll keep waiting for their false
1861 		 * dependency fwnode links to be converted to device links.
1862 		 */
1863 		sup_dev = get_dev_from_fwnode(sup);
1864 		__fw_devlink_link_to_suppliers(sup_dev, sup_dev->fwnode);
1865 		put_device(sup_dev);
1866 	}
1867 
1868 	/*
1869 	 * Make "proxy" SYNC_STATE_ONLY device links to represent the needs of
1870 	 * all the descendants. This proxy link step is needed to handle the
1871 	 * case where the supplier is added before the consumer's parent device
1872 	 * (@dev).
1873 	 */
1874 	while ((child = fwnode_get_next_available_child_node(fwnode, child)))
1875 		__fw_devlink_link_to_suppliers(dev, child);
1876 }
1877 
fw_devlink_link_device(struct device * dev)1878 static void fw_devlink_link_device(struct device *dev)
1879 {
1880 	struct fwnode_handle *fwnode = dev->fwnode;
1881 
1882 	if (!fw_devlink_flags)
1883 		return;
1884 
1885 	fw_devlink_parse_fwtree(fwnode);
1886 
1887 	mutex_lock(&fwnode_link_lock);
1888 	__fw_devlink_link_to_consumers(dev);
1889 	__fw_devlink_link_to_suppliers(dev, fwnode);
1890 	mutex_unlock(&fwnode_link_lock);
1891 }
1892 
1893 /* Device links support end. */
1894 
1895 int (*platform_notify)(struct device *dev) = NULL;
1896 int (*platform_notify_remove)(struct device *dev) = NULL;
1897 static struct kobject *dev_kobj;
1898 struct kobject *sysfs_dev_char_kobj;
1899 struct kobject *sysfs_dev_block_kobj;
1900 
1901 static DEFINE_MUTEX(device_hotplug_lock);
1902 
lock_device_hotplug(void)1903 void lock_device_hotplug(void)
1904 {
1905 	mutex_lock(&device_hotplug_lock);
1906 }
1907 
unlock_device_hotplug(void)1908 void unlock_device_hotplug(void)
1909 {
1910 	mutex_unlock(&device_hotplug_lock);
1911 }
1912 
lock_device_hotplug_sysfs(void)1913 int lock_device_hotplug_sysfs(void)
1914 {
1915 	if (mutex_trylock(&device_hotplug_lock))
1916 		return 0;
1917 
1918 	/* Avoid busy looping (5 ms of sleep should do). */
1919 	msleep(5);
1920 	return restart_syscall();
1921 }
1922 
1923 #ifdef CONFIG_BLOCK
device_is_not_partition(struct device * dev)1924 static inline int device_is_not_partition(struct device *dev)
1925 {
1926 	return !(dev->type == &part_type);
1927 }
1928 #else
device_is_not_partition(struct device * dev)1929 static inline int device_is_not_partition(struct device *dev)
1930 {
1931 	return 1;
1932 }
1933 #endif
1934 
1935 static int
device_platform_notify(struct device * dev,enum kobject_action action)1936 device_platform_notify(struct device *dev, enum kobject_action action)
1937 {
1938 	int ret;
1939 
1940 	ret = acpi_platform_notify(dev, action);
1941 	if (ret)
1942 		return ret;
1943 
1944 	ret = software_node_notify(dev, action);
1945 	if (ret)
1946 		return ret;
1947 
1948 	if (platform_notify && action == KOBJ_ADD)
1949 		platform_notify(dev);
1950 	else if (platform_notify_remove && action == KOBJ_REMOVE)
1951 		platform_notify_remove(dev);
1952 	return 0;
1953 }
1954 
1955 /**
1956  * dev_driver_string - Return a device's driver name, if at all possible
1957  * @dev: struct device to get the name of
1958  *
1959  * Will return the device's driver's name if it is bound to a device.  If
1960  * the device is not bound to a driver, it will return the name of the bus
1961  * it is attached to.  If it is not attached to a bus either, an empty
1962  * string will be returned.
1963  */
dev_driver_string(const struct device * dev)1964 const char *dev_driver_string(const struct device *dev)
1965 {
1966 	struct device_driver *drv;
1967 
1968 	/* dev->driver can change to NULL underneath us because of unbinding,
1969 	 * so be careful about accessing it.  dev->bus and dev->class should
1970 	 * never change once they are set, so they don't need special care.
1971 	 */
1972 	drv = READ_ONCE(dev->driver);
1973 	return drv ? drv->name : dev_bus_name(dev);
1974 }
1975 EXPORT_SYMBOL(dev_driver_string);
1976 
1977 #define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
1978 
dev_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)1979 static ssize_t dev_attr_show(struct kobject *kobj, struct attribute *attr,
1980 			     char *buf)
1981 {
1982 	struct device_attribute *dev_attr = to_dev_attr(attr);
1983 	struct device *dev = kobj_to_dev(kobj);
1984 	ssize_t ret = -EIO;
1985 
1986 	if (dev_attr->show)
1987 		ret = dev_attr->show(dev, dev_attr, buf);
1988 	if (ret >= (ssize_t)PAGE_SIZE) {
1989 		printk("dev_attr_show: %pS returned bad count\n",
1990 				dev_attr->show);
1991 	}
1992 	return ret;
1993 }
1994 
dev_attr_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t count)1995 static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
1996 			      const char *buf, size_t count)
1997 {
1998 	struct device_attribute *dev_attr = to_dev_attr(attr);
1999 	struct device *dev = kobj_to_dev(kobj);
2000 	ssize_t ret = -EIO;
2001 
2002 	if (dev_attr->store)
2003 		ret = dev_attr->store(dev, dev_attr, buf, count);
2004 	return ret;
2005 }
2006 
2007 static const struct sysfs_ops dev_sysfs_ops = {
2008 	.show	= dev_attr_show,
2009 	.store	= dev_attr_store,
2010 };
2011 
2012 #define to_ext_attr(x) container_of(x, struct dev_ext_attribute, attr)
2013 
device_store_ulong(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)2014 ssize_t device_store_ulong(struct device *dev,
2015 			   struct device_attribute *attr,
2016 			   const char *buf, size_t size)
2017 {
2018 	struct dev_ext_attribute *ea = to_ext_attr(attr);
2019 	int ret;
2020 	unsigned long new;
2021 
2022 	ret = kstrtoul(buf, 0, &new);
2023 	if (ret)
2024 		return ret;
2025 	*(unsigned long *)(ea->var) = new;
2026 	/* Always return full write size even if we didn't consume all */
2027 	return size;
2028 }
2029 EXPORT_SYMBOL_GPL(device_store_ulong);
2030 
device_show_ulong(struct device * dev,struct device_attribute * attr,char * buf)2031 ssize_t device_show_ulong(struct device *dev,
2032 			  struct device_attribute *attr,
2033 			  char *buf)
2034 {
2035 	struct dev_ext_attribute *ea = to_ext_attr(attr);
2036 	return sysfs_emit(buf, "%lx\n", *(unsigned long *)(ea->var));
2037 }
2038 EXPORT_SYMBOL_GPL(device_show_ulong);
2039 
device_store_int(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)2040 ssize_t device_store_int(struct device *dev,
2041 			 struct device_attribute *attr,
2042 			 const char *buf, size_t size)
2043 {
2044 	struct dev_ext_attribute *ea = to_ext_attr(attr);
2045 	int ret;
2046 	long new;
2047 
2048 	ret = kstrtol(buf, 0, &new);
2049 	if (ret)
2050 		return ret;
2051 
2052 	if (new > INT_MAX || new < INT_MIN)
2053 		return -EINVAL;
2054 	*(int *)(ea->var) = new;
2055 	/* Always return full write size even if we didn't consume all */
2056 	return size;
2057 }
2058 EXPORT_SYMBOL_GPL(device_store_int);
2059 
device_show_int(struct device * dev,struct device_attribute * attr,char * buf)2060 ssize_t device_show_int(struct device *dev,
2061 			struct device_attribute *attr,
2062 			char *buf)
2063 {
2064 	struct dev_ext_attribute *ea = to_ext_attr(attr);
2065 
2066 	return sysfs_emit(buf, "%d\n", *(int *)(ea->var));
2067 }
2068 EXPORT_SYMBOL_GPL(device_show_int);
2069 
device_store_bool(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)2070 ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
2071 			  const char *buf, size_t size)
2072 {
2073 	struct dev_ext_attribute *ea = to_ext_attr(attr);
2074 
2075 	if (strtobool(buf, ea->var) < 0)
2076 		return -EINVAL;
2077 
2078 	return size;
2079 }
2080 EXPORT_SYMBOL_GPL(device_store_bool);
2081 
device_show_bool(struct device * dev,struct device_attribute * attr,char * buf)2082 ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
2083 			 char *buf)
2084 {
2085 	struct dev_ext_attribute *ea = to_ext_attr(attr);
2086 
2087 	return sysfs_emit(buf, "%d\n", *(bool *)(ea->var));
2088 }
2089 EXPORT_SYMBOL_GPL(device_show_bool);
2090 
2091 /**
2092  * device_release - free device structure.
2093  * @kobj: device's kobject.
2094  *
2095  * This is called once the reference count for the object
2096  * reaches 0. We forward the call to the device's release
2097  * method, which should handle actually freeing the structure.
2098  */
device_release(struct kobject * kobj)2099 static void device_release(struct kobject *kobj)
2100 {
2101 	struct device *dev = kobj_to_dev(kobj);
2102 	struct device_private *p = dev->p;
2103 
2104 	/*
2105 	 * Some platform devices are driven without driver attached
2106 	 * and managed resources may have been acquired.  Make sure
2107 	 * all resources are released.
2108 	 *
2109 	 * Drivers still can add resources into device after device
2110 	 * is deleted but alive, so release devres here to avoid
2111 	 * possible memory leak.
2112 	 */
2113 	devres_release_all(dev);
2114 
2115 	kfree(dev->dma_range_map);
2116 
2117 	if (dev->release)
2118 		dev->release(dev);
2119 	else if (dev->type && dev->type->release)
2120 		dev->type->release(dev);
2121 	else if (dev->class && dev->class->dev_release)
2122 		dev->class->dev_release(dev);
2123 	else
2124 		WARN(1, KERN_ERR "Device '%s' does not have a release() function, it is broken and must be fixed. See Documentation/core-api/kobject.rst.\n",
2125 			dev_name(dev));
2126 	kfree(p);
2127 }
2128 
device_namespace(struct kobject * kobj)2129 static const void *device_namespace(struct kobject *kobj)
2130 {
2131 	struct device *dev = kobj_to_dev(kobj);
2132 	const void *ns = NULL;
2133 
2134 	if (dev->class && dev->class->ns_type)
2135 		ns = dev->class->namespace(dev);
2136 
2137 	return ns;
2138 }
2139 
device_get_ownership(struct kobject * kobj,kuid_t * uid,kgid_t * gid)2140 static void device_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
2141 {
2142 	struct device *dev = kobj_to_dev(kobj);
2143 
2144 	if (dev->class && dev->class->get_ownership)
2145 		dev->class->get_ownership(dev, uid, gid);
2146 }
2147 
2148 static struct kobj_type device_ktype = {
2149 	.release	= device_release,
2150 	.sysfs_ops	= &dev_sysfs_ops,
2151 	.namespace	= device_namespace,
2152 	.get_ownership	= device_get_ownership,
2153 };
2154 
2155 
dev_uevent_filter(struct kset * kset,struct kobject * kobj)2156 static int dev_uevent_filter(struct kset *kset, struct kobject *kobj)
2157 {
2158 	struct kobj_type *ktype = get_ktype(kobj);
2159 
2160 	if (ktype == &device_ktype) {
2161 		struct device *dev = kobj_to_dev(kobj);
2162 		if (dev->bus)
2163 			return 1;
2164 		if (dev->class)
2165 			return 1;
2166 	}
2167 	return 0;
2168 }
2169 
dev_uevent_name(struct kset * kset,struct kobject * kobj)2170 static const char *dev_uevent_name(struct kset *kset, struct kobject *kobj)
2171 {
2172 	struct device *dev = kobj_to_dev(kobj);
2173 
2174 	if (dev->bus)
2175 		return dev->bus->name;
2176 	if (dev->class)
2177 		return dev->class->name;
2178 	return NULL;
2179 }
2180 
dev_uevent(struct kset * kset,struct kobject * kobj,struct kobj_uevent_env * env)2181 static int dev_uevent(struct kset *kset, struct kobject *kobj,
2182 		      struct kobj_uevent_env *env)
2183 {
2184 	struct device *dev = kobj_to_dev(kobj);
2185 	int retval = 0;
2186 
2187 	/* add device node properties if present */
2188 	if (MAJOR(dev->devt)) {
2189 		const char *tmp;
2190 		const char *name;
2191 		umode_t mode = 0;
2192 		kuid_t uid = GLOBAL_ROOT_UID;
2193 		kgid_t gid = GLOBAL_ROOT_GID;
2194 
2195 		add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt));
2196 		add_uevent_var(env, "MINOR=%u", MINOR(dev->devt));
2197 		name = device_get_devnode(dev, &mode, &uid, &gid, &tmp);
2198 		if (name) {
2199 			add_uevent_var(env, "DEVNAME=%s", name);
2200 			if (mode)
2201 				add_uevent_var(env, "DEVMODE=%#o", mode & 0777);
2202 			if (!uid_eq(uid, GLOBAL_ROOT_UID))
2203 				add_uevent_var(env, "DEVUID=%u", from_kuid(&init_user_ns, uid));
2204 			if (!gid_eq(gid, GLOBAL_ROOT_GID))
2205 				add_uevent_var(env, "DEVGID=%u", from_kgid(&init_user_ns, gid));
2206 			kfree(tmp);
2207 		}
2208 	}
2209 
2210 	if (dev->type && dev->type->name)
2211 		add_uevent_var(env, "DEVTYPE=%s", dev->type->name);
2212 
2213 	if (dev->driver)
2214 		add_uevent_var(env, "DRIVER=%s", dev->driver->name);
2215 
2216 	/* Add common DT information about the device */
2217 	of_device_uevent(dev, env);
2218 
2219 	/* have the bus specific function add its stuff */
2220 	if (dev->bus && dev->bus->uevent) {
2221 		retval = dev->bus->uevent(dev, env);
2222 		if (retval)
2223 			pr_debug("device: '%s': %s: bus uevent() returned %d\n",
2224 				 dev_name(dev), __func__, retval);
2225 	}
2226 
2227 	/* have the class specific function add its stuff */
2228 	if (dev->class && dev->class->dev_uevent) {
2229 		retval = dev->class->dev_uevent(dev, env);
2230 		if (retval)
2231 			pr_debug("device: '%s': %s: class uevent() "
2232 				 "returned %d\n", dev_name(dev),
2233 				 __func__, retval);
2234 	}
2235 
2236 	/* have the device type specific function add its stuff */
2237 	if (dev->type && dev->type->uevent) {
2238 		retval = dev->type->uevent(dev, env);
2239 		if (retval)
2240 			pr_debug("device: '%s': %s: dev_type uevent() "
2241 				 "returned %d\n", dev_name(dev),
2242 				 __func__, retval);
2243 	}
2244 
2245 	return retval;
2246 }
2247 
2248 static const struct kset_uevent_ops device_uevent_ops = {
2249 	.filter =	dev_uevent_filter,
2250 	.name =		dev_uevent_name,
2251 	.uevent =	dev_uevent,
2252 };
2253 
uevent_show(struct device * dev,struct device_attribute * attr,char * buf)2254 static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
2255 			   char *buf)
2256 {
2257 	struct kobject *top_kobj;
2258 	struct kset *kset;
2259 	struct kobj_uevent_env *env = NULL;
2260 	int i;
2261 	int len = 0;
2262 	int retval;
2263 
2264 	/* search the kset, the device belongs to */
2265 	top_kobj = &dev->kobj;
2266 	while (!top_kobj->kset && top_kobj->parent)
2267 		top_kobj = top_kobj->parent;
2268 	if (!top_kobj->kset)
2269 		goto out;
2270 
2271 	kset = top_kobj->kset;
2272 	if (!kset->uevent_ops || !kset->uevent_ops->uevent)
2273 		goto out;
2274 
2275 	/* respect filter */
2276 	if (kset->uevent_ops && kset->uevent_ops->filter)
2277 		if (!kset->uevent_ops->filter(kset, &dev->kobj))
2278 			goto out;
2279 
2280 	env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
2281 	if (!env)
2282 		return -ENOMEM;
2283 
2284 	/* let the kset specific function add its keys */
2285 	retval = kset->uevent_ops->uevent(kset, &dev->kobj, env);
2286 	if (retval)
2287 		goto out;
2288 
2289 	/* copy keys to file */
2290 	for (i = 0; i < env->envp_idx; i++)
2291 		len += sysfs_emit_at(buf, len, "%s\n", env->envp[i]);
2292 out:
2293 	kfree(env);
2294 	return len;
2295 }
2296 
uevent_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2297 static ssize_t uevent_store(struct device *dev, struct device_attribute *attr,
2298 			    const char *buf, size_t count)
2299 {
2300 	int rc;
2301 
2302 	rc = kobject_synth_uevent(&dev->kobj, buf, count);
2303 
2304 	if (rc) {
2305 		dev_err(dev, "uevent: failed to send synthetic uevent\n");
2306 		return rc;
2307 	}
2308 
2309 	return count;
2310 }
2311 static DEVICE_ATTR_RW(uevent);
2312 
online_show(struct device * dev,struct device_attribute * attr,char * buf)2313 static ssize_t online_show(struct device *dev, struct device_attribute *attr,
2314 			   char *buf)
2315 {
2316 	bool val;
2317 
2318 	device_lock(dev);
2319 	val = !dev->offline;
2320 	device_unlock(dev);
2321 	return sysfs_emit(buf, "%u\n", val);
2322 }
2323 
online_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2324 static ssize_t online_store(struct device *dev, struct device_attribute *attr,
2325 			    const char *buf, size_t count)
2326 {
2327 	bool val;
2328 	int ret;
2329 
2330 	ret = strtobool(buf, &val);
2331 	if (ret < 0)
2332 		return ret;
2333 
2334 	ret = lock_device_hotplug_sysfs();
2335 	if (ret)
2336 		return ret;
2337 
2338 	ret = val ? device_online(dev) : device_offline(dev);
2339 	unlock_device_hotplug();
2340 	return ret < 0 ? ret : count;
2341 }
2342 static DEVICE_ATTR_RW(online);
2343 
device_add_groups(struct device * dev,const struct attribute_group ** groups)2344 int device_add_groups(struct device *dev, const struct attribute_group **groups)
2345 {
2346 	return sysfs_create_groups(&dev->kobj, groups);
2347 }
2348 EXPORT_SYMBOL_GPL(device_add_groups);
2349 
device_remove_groups(struct device * dev,const struct attribute_group ** groups)2350 void device_remove_groups(struct device *dev,
2351 			  const struct attribute_group **groups)
2352 {
2353 	sysfs_remove_groups(&dev->kobj, groups);
2354 }
2355 EXPORT_SYMBOL_GPL(device_remove_groups);
2356 
2357 union device_attr_group_devres {
2358 	const struct attribute_group *group;
2359 	const struct attribute_group **groups;
2360 };
2361 
devm_attr_group_match(struct device * dev,void * res,void * data)2362 static int devm_attr_group_match(struct device *dev, void *res, void *data)
2363 {
2364 	return ((union device_attr_group_devres *)res)->group == data;
2365 }
2366 
devm_attr_group_remove(struct device * dev,void * res)2367 static void devm_attr_group_remove(struct device *dev, void *res)
2368 {
2369 	union device_attr_group_devres *devres = res;
2370 	const struct attribute_group *group = devres->group;
2371 
2372 	dev_dbg(dev, "%s: removing group %p\n", __func__, group);
2373 	sysfs_remove_group(&dev->kobj, group);
2374 }
2375 
devm_attr_groups_remove(struct device * dev,void * res)2376 static void devm_attr_groups_remove(struct device *dev, void *res)
2377 {
2378 	union device_attr_group_devres *devres = res;
2379 	const struct attribute_group **groups = devres->groups;
2380 
2381 	dev_dbg(dev, "%s: removing groups %p\n", __func__, groups);
2382 	sysfs_remove_groups(&dev->kobj, groups);
2383 }
2384 
2385 /**
2386  * devm_device_add_group - given a device, create a managed attribute group
2387  * @dev:	The device to create the group for
2388  * @grp:	The attribute group to create
2389  *
2390  * This function creates a group for the first time.  It will explicitly
2391  * warn and error if any of the attribute files being created already exist.
2392  *
2393  * Returns 0 on success or error code on failure.
2394  */
devm_device_add_group(struct device * dev,const struct attribute_group * grp)2395 int devm_device_add_group(struct device *dev, const struct attribute_group *grp)
2396 {
2397 	union device_attr_group_devres *devres;
2398 	int error;
2399 
2400 	devres = devres_alloc(devm_attr_group_remove,
2401 			      sizeof(*devres), GFP_KERNEL);
2402 	if (!devres)
2403 		return -ENOMEM;
2404 
2405 	error = sysfs_create_group(&dev->kobj, grp);
2406 	if (error) {
2407 		devres_free(devres);
2408 		return error;
2409 	}
2410 
2411 	devres->group = grp;
2412 	devres_add(dev, devres);
2413 	return 0;
2414 }
2415 EXPORT_SYMBOL_GPL(devm_device_add_group);
2416 
2417 /**
2418  * devm_device_remove_group: remove a managed group from a device
2419  * @dev:	device to remove the group from
2420  * @grp:	group to remove
2421  *
2422  * This function removes a group of attributes from a device. The attributes
2423  * previously have to have been created for this group, otherwise it will fail.
2424  */
devm_device_remove_group(struct device * dev,const struct attribute_group * grp)2425 void devm_device_remove_group(struct device *dev,
2426 			      const struct attribute_group *grp)
2427 {
2428 	WARN_ON(devres_release(dev, devm_attr_group_remove,
2429 			       devm_attr_group_match,
2430 			       /* cast away const */ (void *)grp));
2431 }
2432 EXPORT_SYMBOL_GPL(devm_device_remove_group);
2433 
2434 /**
2435  * devm_device_add_groups - create a bunch of managed attribute groups
2436  * @dev:	The device to create the group for
2437  * @groups:	The attribute groups to create, NULL terminated
2438  *
2439  * This function creates a bunch of managed attribute groups.  If an error
2440  * occurs when creating a group, all previously created groups will be
2441  * removed, unwinding everything back to the original state when this
2442  * function was called.  It will explicitly warn and error if any of the
2443  * attribute files being created already exist.
2444  *
2445  * Returns 0 on success or error code from sysfs_create_group on failure.
2446  */
devm_device_add_groups(struct device * dev,const struct attribute_group ** groups)2447 int devm_device_add_groups(struct device *dev,
2448 			   const struct attribute_group **groups)
2449 {
2450 	union device_attr_group_devres *devres;
2451 	int error;
2452 
2453 	devres = devres_alloc(devm_attr_groups_remove,
2454 			      sizeof(*devres), GFP_KERNEL);
2455 	if (!devres)
2456 		return -ENOMEM;
2457 
2458 	error = sysfs_create_groups(&dev->kobj, groups);
2459 	if (error) {
2460 		devres_free(devres);
2461 		return error;
2462 	}
2463 
2464 	devres->groups = groups;
2465 	devres_add(dev, devres);
2466 	return 0;
2467 }
2468 EXPORT_SYMBOL_GPL(devm_device_add_groups);
2469 
2470 /**
2471  * devm_device_remove_groups - remove a list of managed groups
2472  *
2473  * @dev:	The device for the groups to be removed from
2474  * @groups:	NULL terminated list of groups to be removed
2475  *
2476  * If groups is not NULL, remove the specified groups from the device.
2477  */
devm_device_remove_groups(struct device * dev,const struct attribute_group ** groups)2478 void devm_device_remove_groups(struct device *dev,
2479 			       const struct attribute_group **groups)
2480 {
2481 	WARN_ON(devres_release(dev, devm_attr_groups_remove,
2482 			       devm_attr_group_match,
2483 			       /* cast away const */ (void *)groups));
2484 }
2485 EXPORT_SYMBOL_GPL(devm_device_remove_groups);
2486 
device_add_attrs(struct device * dev)2487 static int device_add_attrs(struct device *dev)
2488 {
2489 	struct class *class = dev->class;
2490 	const struct device_type *type = dev->type;
2491 	int error;
2492 
2493 	if (class) {
2494 		error = device_add_groups(dev, class->dev_groups);
2495 		if (error)
2496 			return error;
2497 	}
2498 
2499 	if (type) {
2500 		error = device_add_groups(dev, type->groups);
2501 		if (error)
2502 			goto err_remove_class_groups;
2503 	}
2504 
2505 	error = device_add_groups(dev, dev->groups);
2506 	if (error)
2507 		goto err_remove_type_groups;
2508 
2509 	if (device_supports_offline(dev) && !dev->offline_disabled) {
2510 		error = device_create_file(dev, &dev_attr_online);
2511 		if (error)
2512 			goto err_remove_dev_groups;
2513 	}
2514 
2515 	if (fw_devlink_flags && !fw_devlink_is_permissive() && dev->fwnode) {
2516 		error = device_create_file(dev, &dev_attr_waiting_for_supplier);
2517 		if (error)
2518 			goto err_remove_dev_online;
2519 	}
2520 
2521 	return 0;
2522 
2523  err_remove_dev_online:
2524 	device_remove_file(dev, &dev_attr_online);
2525  err_remove_dev_groups:
2526 	device_remove_groups(dev, dev->groups);
2527  err_remove_type_groups:
2528 	if (type)
2529 		device_remove_groups(dev, type->groups);
2530  err_remove_class_groups:
2531 	if (class)
2532 		device_remove_groups(dev, class->dev_groups);
2533 
2534 	return error;
2535 }
2536 
device_remove_attrs(struct device * dev)2537 static void device_remove_attrs(struct device *dev)
2538 {
2539 	struct class *class = dev->class;
2540 	const struct device_type *type = dev->type;
2541 
2542 	device_remove_file(dev, &dev_attr_waiting_for_supplier);
2543 	device_remove_file(dev, &dev_attr_online);
2544 	device_remove_groups(dev, dev->groups);
2545 
2546 	if (type)
2547 		device_remove_groups(dev, type->groups);
2548 
2549 	if (class)
2550 		device_remove_groups(dev, class->dev_groups);
2551 }
2552 
dev_show(struct device * dev,struct device_attribute * attr,char * buf)2553 static ssize_t dev_show(struct device *dev, struct device_attribute *attr,
2554 			char *buf)
2555 {
2556 	return print_dev_t(buf, dev->devt);
2557 }
2558 static DEVICE_ATTR_RO(dev);
2559 
2560 /* /sys/devices/ */
2561 struct kset *devices_kset;
2562 
2563 /**
2564  * devices_kset_move_before - Move device in the devices_kset's list.
2565  * @deva: Device to move.
2566  * @devb: Device @deva should come before.
2567  */
devices_kset_move_before(struct device * deva,struct device * devb)2568 static void devices_kset_move_before(struct device *deva, struct device *devb)
2569 {
2570 	if (!devices_kset)
2571 		return;
2572 	pr_debug("devices_kset: Moving %s before %s\n",
2573 		 dev_name(deva), dev_name(devb));
2574 	spin_lock(&devices_kset->list_lock);
2575 	list_move_tail(&deva->kobj.entry, &devb->kobj.entry);
2576 	spin_unlock(&devices_kset->list_lock);
2577 }
2578 
2579 /**
2580  * devices_kset_move_after - Move device in the devices_kset's list.
2581  * @deva: Device to move
2582  * @devb: Device @deva should come after.
2583  */
devices_kset_move_after(struct device * deva,struct device * devb)2584 static void devices_kset_move_after(struct device *deva, struct device *devb)
2585 {
2586 	if (!devices_kset)
2587 		return;
2588 	pr_debug("devices_kset: Moving %s after %s\n",
2589 		 dev_name(deva), dev_name(devb));
2590 	spin_lock(&devices_kset->list_lock);
2591 	list_move(&deva->kobj.entry, &devb->kobj.entry);
2592 	spin_unlock(&devices_kset->list_lock);
2593 }
2594 
2595 /**
2596  * devices_kset_move_last - move the device to the end of devices_kset's list.
2597  * @dev: device to move
2598  */
devices_kset_move_last(struct device * dev)2599 void devices_kset_move_last(struct device *dev)
2600 {
2601 	if (!devices_kset)
2602 		return;
2603 	pr_debug("devices_kset: Moving %s to end of list\n", dev_name(dev));
2604 	spin_lock(&devices_kset->list_lock);
2605 	list_move_tail(&dev->kobj.entry, &devices_kset->list);
2606 	spin_unlock(&devices_kset->list_lock);
2607 }
2608 
2609 /**
2610  * device_create_file - create sysfs attribute file for device.
2611  * @dev: device.
2612  * @attr: device attribute descriptor.
2613  */
device_create_file(struct device * dev,const struct device_attribute * attr)2614 int device_create_file(struct device *dev,
2615 		       const struct device_attribute *attr)
2616 {
2617 	int error = 0;
2618 
2619 	if (dev) {
2620 		WARN(((attr->attr.mode & S_IWUGO) && !attr->store),
2621 			"Attribute %s: write permission without 'store'\n",
2622 			attr->attr.name);
2623 		WARN(((attr->attr.mode & S_IRUGO) && !attr->show),
2624 			"Attribute %s: read permission without 'show'\n",
2625 			attr->attr.name);
2626 		error = sysfs_create_file(&dev->kobj, &attr->attr);
2627 	}
2628 
2629 	return error;
2630 }
2631 EXPORT_SYMBOL_GPL(device_create_file);
2632 
2633 /**
2634  * device_remove_file - remove sysfs attribute file.
2635  * @dev: device.
2636  * @attr: device attribute descriptor.
2637  */
device_remove_file(struct device * dev,const struct device_attribute * attr)2638 void device_remove_file(struct device *dev,
2639 			const struct device_attribute *attr)
2640 {
2641 	if (dev)
2642 		sysfs_remove_file(&dev->kobj, &attr->attr);
2643 }
2644 EXPORT_SYMBOL_GPL(device_remove_file);
2645 
2646 /**
2647  * device_remove_file_self - remove sysfs attribute file from its own method.
2648  * @dev: device.
2649  * @attr: device attribute descriptor.
2650  *
2651  * See kernfs_remove_self() for details.
2652  */
device_remove_file_self(struct device * dev,const struct device_attribute * attr)2653 bool device_remove_file_self(struct device *dev,
2654 			     const struct device_attribute *attr)
2655 {
2656 	if (dev)
2657 		return sysfs_remove_file_self(&dev->kobj, &attr->attr);
2658 	else
2659 		return false;
2660 }
2661 EXPORT_SYMBOL_GPL(device_remove_file_self);
2662 
2663 /**
2664  * device_create_bin_file - create sysfs binary attribute file for device.
2665  * @dev: device.
2666  * @attr: device binary attribute descriptor.
2667  */
device_create_bin_file(struct device * dev,const struct bin_attribute * attr)2668 int device_create_bin_file(struct device *dev,
2669 			   const struct bin_attribute *attr)
2670 {
2671 	int error = -EINVAL;
2672 	if (dev)
2673 		error = sysfs_create_bin_file(&dev->kobj, attr);
2674 	return error;
2675 }
2676 EXPORT_SYMBOL_GPL(device_create_bin_file);
2677 
2678 /**
2679  * device_remove_bin_file - remove sysfs binary attribute file
2680  * @dev: device.
2681  * @attr: device binary attribute descriptor.
2682  */
device_remove_bin_file(struct device * dev,const struct bin_attribute * attr)2683 void device_remove_bin_file(struct device *dev,
2684 			    const struct bin_attribute *attr)
2685 {
2686 	if (dev)
2687 		sysfs_remove_bin_file(&dev->kobj, attr);
2688 }
2689 EXPORT_SYMBOL_GPL(device_remove_bin_file);
2690 
klist_children_get(struct klist_node * n)2691 static void klist_children_get(struct klist_node *n)
2692 {
2693 	struct device_private *p = to_device_private_parent(n);
2694 	struct device *dev = p->device;
2695 
2696 	get_device(dev);
2697 }
2698 
klist_children_put(struct klist_node * n)2699 static void klist_children_put(struct klist_node *n)
2700 {
2701 	struct device_private *p = to_device_private_parent(n);
2702 	struct device *dev = p->device;
2703 
2704 	put_device(dev);
2705 }
2706 
2707 /**
2708  * device_initialize - init device structure.
2709  * @dev: device.
2710  *
2711  * This prepares the device for use by other layers by initializing
2712  * its fields.
2713  * It is the first half of device_register(), if called by
2714  * that function, though it can also be called separately, so one
2715  * may use @dev's fields. In particular, get_device()/put_device()
2716  * may be used for reference counting of @dev after calling this
2717  * function.
2718  *
2719  * All fields in @dev must be initialized by the caller to 0, except
2720  * for those explicitly set to some other value.  The simplest
2721  * approach is to use kzalloc() to allocate the structure containing
2722  * @dev.
2723  *
2724  * NOTE: Use put_device() to give up your reference instead of freeing
2725  * @dev directly once you have called this function.
2726  */
device_initialize(struct device * dev)2727 void device_initialize(struct device *dev)
2728 {
2729 	dev->kobj.kset = devices_kset;
2730 	kobject_init(&dev->kobj, &device_ktype);
2731 	INIT_LIST_HEAD(&dev->dma_pools);
2732 	mutex_init(&dev->mutex);
2733 #ifdef CONFIG_PROVE_LOCKING
2734 	mutex_init(&dev->lockdep_mutex);
2735 #endif
2736 	lockdep_set_novalidate_class(&dev->mutex);
2737 	spin_lock_init(&dev->devres_lock);
2738 	INIT_LIST_HEAD(&dev->devres_head);
2739 	device_pm_init(dev);
2740 	set_dev_node(dev, -1);
2741 #ifdef CONFIG_GENERIC_MSI_IRQ
2742 	raw_spin_lock_init(&dev->msi_lock);
2743 	INIT_LIST_HEAD(&dev->msi_list);
2744 #endif
2745 	INIT_LIST_HEAD(&dev->links.consumers);
2746 	INIT_LIST_HEAD(&dev->links.suppliers);
2747 	INIT_LIST_HEAD(&dev->links.defer_sync);
2748 	dev->links.status = DL_DEV_NO_DRIVER;
2749 #ifdef CONFIG_SWIOTLB
2750 	dev->dma_io_tlb_mem = &io_tlb_default_mem;
2751 #endif
2752 }
2753 EXPORT_SYMBOL_GPL(device_initialize);
2754 
virtual_device_parent(struct device * dev)2755 struct kobject *virtual_device_parent(struct device *dev)
2756 {
2757 	static struct kobject *virtual_dir = NULL;
2758 
2759 	if (!virtual_dir)
2760 		virtual_dir = kobject_create_and_add("virtual",
2761 						     &devices_kset->kobj);
2762 
2763 	return virtual_dir;
2764 }
2765 
2766 struct class_dir {
2767 	struct kobject kobj;
2768 	struct class *class;
2769 };
2770 
2771 #define to_class_dir(obj) container_of(obj, struct class_dir, kobj)
2772 
class_dir_release(struct kobject * kobj)2773 static void class_dir_release(struct kobject *kobj)
2774 {
2775 	struct class_dir *dir = to_class_dir(kobj);
2776 	kfree(dir);
2777 }
2778 
2779 static const
class_dir_child_ns_type(struct kobject * kobj)2780 struct kobj_ns_type_operations *class_dir_child_ns_type(struct kobject *kobj)
2781 {
2782 	struct class_dir *dir = to_class_dir(kobj);
2783 	return dir->class->ns_type;
2784 }
2785 
2786 static struct kobj_type class_dir_ktype = {
2787 	.release	= class_dir_release,
2788 	.sysfs_ops	= &kobj_sysfs_ops,
2789 	.child_ns_type	= class_dir_child_ns_type
2790 };
2791 
2792 static struct kobject *
class_dir_create_and_add(struct class * class,struct kobject * parent_kobj)2793 class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
2794 {
2795 	struct class_dir *dir;
2796 	int retval;
2797 
2798 	dir = kzalloc(sizeof(*dir), GFP_KERNEL);
2799 	if (!dir)
2800 		return ERR_PTR(-ENOMEM);
2801 
2802 	dir->class = class;
2803 	kobject_init(&dir->kobj, &class_dir_ktype);
2804 
2805 	dir->kobj.kset = &class->p->glue_dirs;
2806 
2807 	retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name);
2808 	if (retval < 0) {
2809 		kobject_put(&dir->kobj);
2810 		return ERR_PTR(retval);
2811 	}
2812 	return &dir->kobj;
2813 }
2814 
2815 static DEFINE_MUTEX(gdp_mutex);
2816 
get_device_parent(struct device * dev,struct device * parent)2817 static struct kobject *get_device_parent(struct device *dev,
2818 					 struct device *parent)
2819 {
2820 	if (dev->class) {
2821 		struct kobject *kobj = NULL;
2822 		struct kobject *parent_kobj;
2823 		struct kobject *k;
2824 
2825 #ifdef CONFIG_BLOCK
2826 		/* block disks show up in /sys/block */
2827 		if (sysfs_deprecated && dev->class == &block_class) {
2828 			if (parent && parent->class == &block_class)
2829 				return &parent->kobj;
2830 			return &block_class.p->subsys.kobj;
2831 		}
2832 #endif
2833 
2834 		/*
2835 		 * If we have no parent, we live in "virtual".
2836 		 * Class-devices with a non class-device as parent, live
2837 		 * in a "glue" directory to prevent namespace collisions.
2838 		 */
2839 		if (parent == NULL)
2840 			parent_kobj = virtual_device_parent(dev);
2841 		else if (parent->class && !dev->class->ns_type)
2842 			return &parent->kobj;
2843 		else
2844 			parent_kobj = &parent->kobj;
2845 
2846 		mutex_lock(&gdp_mutex);
2847 
2848 		/* find our class-directory at the parent and reference it */
2849 		spin_lock(&dev->class->p->glue_dirs.list_lock);
2850 		list_for_each_entry(k, &dev->class->p->glue_dirs.list, entry)
2851 			if (k->parent == parent_kobj) {
2852 				kobj = kobject_get(k);
2853 				break;
2854 			}
2855 		spin_unlock(&dev->class->p->glue_dirs.list_lock);
2856 		if (kobj) {
2857 			mutex_unlock(&gdp_mutex);
2858 			return kobj;
2859 		}
2860 
2861 		/* or create a new class-directory at the parent device */
2862 		k = class_dir_create_and_add(dev->class, parent_kobj);
2863 		/* do not emit an uevent for this simple "glue" directory */
2864 		mutex_unlock(&gdp_mutex);
2865 		return k;
2866 	}
2867 
2868 	/* subsystems can specify a default root directory for their devices */
2869 	if (!parent && dev->bus && dev->bus->dev_root)
2870 		return &dev->bus->dev_root->kobj;
2871 
2872 	if (parent)
2873 		return &parent->kobj;
2874 	return NULL;
2875 }
2876 
live_in_glue_dir(struct kobject * kobj,struct device * dev)2877 static inline bool live_in_glue_dir(struct kobject *kobj,
2878 				    struct device *dev)
2879 {
2880 	if (!kobj || !dev->class ||
2881 	    kobj->kset != &dev->class->p->glue_dirs)
2882 		return false;
2883 	return true;
2884 }
2885 
get_glue_dir(struct device * dev)2886 static inline struct kobject *get_glue_dir(struct device *dev)
2887 {
2888 	return dev->kobj.parent;
2889 }
2890 
2891 /*
2892  * make sure cleaning up dir as the last step, we need to make
2893  * sure .release handler of kobject is run with holding the
2894  * global lock
2895  */
cleanup_glue_dir(struct device * dev,struct kobject * glue_dir)2896 static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
2897 {
2898 	unsigned int ref;
2899 
2900 	/* see if we live in a "glue" directory */
2901 	if (!live_in_glue_dir(glue_dir, dev))
2902 		return;
2903 
2904 	mutex_lock(&gdp_mutex);
2905 	/**
2906 	 * There is a race condition between removing glue directory
2907 	 * and adding a new device under the glue directory.
2908 	 *
2909 	 * CPU1:                                         CPU2:
2910 	 *
2911 	 * device_add()
2912 	 *   get_device_parent()
2913 	 *     class_dir_create_and_add()
2914 	 *       kobject_add_internal()
2915 	 *         create_dir()    // create glue_dir
2916 	 *
2917 	 *                                               device_add()
2918 	 *                                                 get_device_parent()
2919 	 *                                                   kobject_get() // get glue_dir
2920 	 *
2921 	 * device_del()
2922 	 *   cleanup_glue_dir()
2923 	 *     kobject_del(glue_dir)
2924 	 *
2925 	 *                                               kobject_add()
2926 	 *                                                 kobject_add_internal()
2927 	 *                                                   create_dir() // in glue_dir
2928 	 *                                                     sysfs_create_dir_ns()
2929 	 *                                                       kernfs_create_dir_ns(sd)
2930 	 *
2931 	 *       sysfs_remove_dir() // glue_dir->sd=NULL
2932 	 *       sysfs_put()        // free glue_dir->sd
2933 	 *
2934 	 *                                                         // sd is freed
2935 	 *                                                         kernfs_new_node(sd)
2936 	 *                                                           kernfs_get(glue_dir)
2937 	 *                                                           kernfs_add_one()
2938 	 *                                                           kernfs_put()
2939 	 *
2940 	 * Before CPU1 remove last child device under glue dir, if CPU2 add
2941 	 * a new device under glue dir, the glue_dir kobject reference count
2942 	 * will be increase to 2 in kobject_get(k). And CPU2 has been called
2943 	 * kernfs_create_dir_ns(). Meanwhile, CPU1 call sysfs_remove_dir()
2944 	 * and sysfs_put(). This result in glue_dir->sd is freed.
2945 	 *
2946 	 * Then the CPU2 will see a stale "empty" but still potentially used
2947 	 * glue dir around in kernfs_new_node().
2948 	 *
2949 	 * In order to avoid this happening, we also should make sure that
2950 	 * kernfs_node for glue_dir is released in CPU1 only when refcount
2951 	 * for glue_dir kobj is 1.
2952 	 */
2953 	ref = kref_read(&glue_dir->kref);
2954 	if (!kobject_has_children(glue_dir) && !--ref)
2955 		kobject_del(glue_dir);
2956 	kobject_put(glue_dir);
2957 	mutex_unlock(&gdp_mutex);
2958 }
2959 
device_add_class_symlinks(struct device * dev)2960 static int device_add_class_symlinks(struct device *dev)
2961 {
2962 	struct device_node *of_node = dev_of_node(dev);
2963 	int error;
2964 
2965 	if (of_node) {
2966 		error = sysfs_create_link(&dev->kobj, of_node_kobj(of_node), "of_node");
2967 		if (error)
2968 			dev_warn(dev, "Error %d creating of_node link\n",error);
2969 		/* An error here doesn't warrant bringing down the device */
2970 	}
2971 
2972 	if (!dev->class)
2973 		return 0;
2974 
2975 	error = sysfs_create_link(&dev->kobj,
2976 				  &dev->class->p->subsys.kobj,
2977 				  "subsystem");
2978 	if (error)
2979 		goto out_devnode;
2980 
2981 	if (dev->parent && device_is_not_partition(dev)) {
2982 		error = sysfs_create_link(&dev->kobj, &dev->parent->kobj,
2983 					  "device");
2984 		if (error)
2985 			goto out_subsys;
2986 	}
2987 
2988 #ifdef CONFIG_BLOCK
2989 	/* /sys/block has directories and does not need symlinks */
2990 	if (sysfs_deprecated && dev->class == &block_class)
2991 		return 0;
2992 #endif
2993 
2994 	/* link in the class directory pointing to the device */
2995 	error = sysfs_create_link(&dev->class->p->subsys.kobj,
2996 				  &dev->kobj, dev_name(dev));
2997 	if (error)
2998 		goto out_device;
2999 
3000 	return 0;
3001 
3002 out_device:
3003 	sysfs_remove_link(&dev->kobj, "device");
3004 
3005 out_subsys:
3006 	sysfs_remove_link(&dev->kobj, "subsystem");
3007 out_devnode:
3008 	sysfs_remove_link(&dev->kobj, "of_node");
3009 	return error;
3010 }
3011 
device_remove_class_symlinks(struct device * dev)3012 static void device_remove_class_symlinks(struct device *dev)
3013 {
3014 	if (dev_of_node(dev))
3015 		sysfs_remove_link(&dev->kobj, "of_node");
3016 
3017 	if (!dev->class)
3018 		return;
3019 
3020 	if (dev->parent && device_is_not_partition(dev))
3021 		sysfs_remove_link(&dev->kobj, "device");
3022 	sysfs_remove_link(&dev->kobj, "subsystem");
3023 #ifdef CONFIG_BLOCK
3024 	if (sysfs_deprecated && dev->class == &block_class)
3025 		return;
3026 #endif
3027 	sysfs_delete_link(&dev->class->p->subsys.kobj, &dev->kobj, dev_name(dev));
3028 }
3029 
3030 /**
3031  * dev_set_name - set a device name
3032  * @dev: device
3033  * @fmt: format string for the device's name
3034  */
dev_set_name(struct device * dev,const char * fmt,...)3035 int dev_set_name(struct device *dev, const char *fmt, ...)
3036 {
3037 	va_list vargs;
3038 	int err;
3039 
3040 	va_start(vargs, fmt);
3041 	err = kobject_set_name_vargs(&dev->kobj, fmt, vargs);
3042 	va_end(vargs);
3043 	return err;
3044 }
3045 EXPORT_SYMBOL_GPL(dev_set_name);
3046 
3047 /**
3048  * device_to_dev_kobj - select a /sys/dev/ directory for the device
3049  * @dev: device
3050  *
3051  * By default we select char/ for new entries.  Setting class->dev_obj
3052  * to NULL prevents an entry from being created.  class->dev_kobj must
3053  * be set (or cleared) before any devices are registered to the class
3054  * otherwise device_create_sys_dev_entry() and
3055  * device_remove_sys_dev_entry() will disagree about the presence of
3056  * the link.
3057  */
device_to_dev_kobj(struct device * dev)3058 static struct kobject *device_to_dev_kobj(struct device *dev)
3059 {
3060 	struct kobject *kobj;
3061 
3062 	if (dev->class)
3063 		kobj = dev->class->dev_kobj;
3064 	else
3065 		kobj = sysfs_dev_char_kobj;
3066 
3067 	return kobj;
3068 }
3069 
device_create_sys_dev_entry(struct device * dev)3070 static int device_create_sys_dev_entry(struct device *dev)
3071 {
3072 	struct kobject *kobj = device_to_dev_kobj(dev);
3073 	int error = 0;
3074 	char devt_str[15];
3075 
3076 	if (kobj) {
3077 		format_dev_t(devt_str, dev->devt);
3078 		error = sysfs_create_link(kobj, &dev->kobj, devt_str);
3079 	}
3080 
3081 	return error;
3082 }
3083 
device_remove_sys_dev_entry(struct device * dev)3084 static void device_remove_sys_dev_entry(struct device *dev)
3085 {
3086 	struct kobject *kobj = device_to_dev_kobj(dev);
3087 	char devt_str[15];
3088 
3089 	if (kobj) {
3090 		format_dev_t(devt_str, dev->devt);
3091 		sysfs_remove_link(kobj, devt_str);
3092 	}
3093 }
3094 
device_private_init(struct device * dev)3095 static int device_private_init(struct device *dev)
3096 {
3097 	dev->p = kzalloc(sizeof(*dev->p), GFP_KERNEL);
3098 	if (!dev->p)
3099 		return -ENOMEM;
3100 	dev->p->device = dev;
3101 	klist_init(&dev->p->klist_children, klist_children_get,
3102 		   klist_children_put);
3103 	INIT_LIST_HEAD(&dev->p->deferred_probe);
3104 	return 0;
3105 }
3106 
3107 /**
3108  * device_add - add device to device hierarchy.
3109  * @dev: device.
3110  *
3111  * This is part 2 of device_register(), though may be called
3112  * separately _iff_ device_initialize() has been called separately.
3113  *
3114  * This adds @dev to the kobject hierarchy via kobject_add(), adds it
3115  * to the global and sibling lists for the device, then
3116  * adds it to the other relevant subsystems of the driver model.
3117  *
3118  * Do not call this routine or device_register() more than once for
3119  * any device structure.  The driver model core is not designed to work
3120  * with devices that get unregistered and then spring back to life.
3121  * (Among other things, it's very hard to guarantee that all references
3122  * to the previous incarnation of @dev have been dropped.)  Allocate
3123  * and register a fresh new struct device instead.
3124  *
3125  * NOTE: _Never_ directly free @dev after calling this function, even
3126  * if it returned an error! Always use put_device() to give up your
3127  * reference instead.
3128  *
3129  * Rule of thumb is: if device_add() succeeds, you should call
3130  * device_del() when you want to get rid of it. If device_add() has
3131  * *not* succeeded, use *only* put_device() to drop the reference
3132  * count.
3133  */
device_add(struct device * dev)3134 int device_add(struct device *dev)
3135 {
3136 	struct device *parent;
3137 	struct kobject *kobj;
3138 	struct class_interface *class_intf;
3139 	int error = -EINVAL;
3140 	struct kobject *glue_dir = NULL;
3141 
3142 	dev = get_device(dev);
3143 	if (!dev)
3144 		goto done;
3145 
3146 	if (!dev->p) {
3147 		error = device_private_init(dev);
3148 		if (error)
3149 			goto done;
3150 	}
3151 
3152 	/*
3153 	 * for statically allocated devices, which should all be converted
3154 	 * some day, we need to initialize the name. We prevent reading back
3155 	 * the name, and force the use of dev_name()
3156 	 */
3157 	if (dev->init_name) {
3158 		dev_set_name(dev, "%s", dev->init_name);
3159 		dev->init_name = NULL;
3160 	}
3161 
3162 	/* subsystems can specify simple device enumeration */
3163 	if (!dev_name(dev) && dev->bus && dev->bus->dev_name)
3164 		dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id);
3165 
3166 	if (!dev_name(dev)) {
3167 		error = -EINVAL;
3168 		goto name_error;
3169 	}
3170 
3171 	pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
3172 
3173 	parent = get_device(dev->parent);
3174 	kobj = get_device_parent(dev, parent);
3175 	if (IS_ERR(kobj)) {
3176 		error = PTR_ERR(kobj);
3177 		goto parent_error;
3178 	}
3179 	if (kobj)
3180 		dev->kobj.parent = kobj;
3181 
3182 	/* use parent numa_node */
3183 	if (parent && (dev_to_node(dev) == NUMA_NO_NODE))
3184 		set_dev_node(dev, dev_to_node(parent));
3185 
3186 	/* first, register with generic layer. */
3187 	/* we require the name to be set before, and pass NULL */
3188 	error = kobject_add(&dev->kobj, dev->kobj.parent, NULL);
3189 	if (error) {
3190 		glue_dir = get_glue_dir(dev);
3191 		goto Error;
3192 	}
3193 
3194 	/* notify platform of device entry */
3195 	error = device_platform_notify(dev, KOBJ_ADD);
3196 	if (error)
3197 		goto platform_error;
3198 
3199 	error = device_create_file(dev, &dev_attr_uevent);
3200 	if (error)
3201 		goto attrError;
3202 
3203 	error = device_add_class_symlinks(dev);
3204 	if (error)
3205 		goto SymlinkError;
3206 	error = device_add_attrs(dev);
3207 	if (error)
3208 		goto AttrsError;
3209 	error = bus_add_device(dev);
3210 	if (error)
3211 		goto BusError;
3212 	error = dpm_sysfs_add(dev);
3213 	if (error)
3214 		goto DPMError;
3215 	device_pm_add(dev);
3216 
3217 	if (MAJOR(dev->devt)) {
3218 		error = device_create_file(dev, &dev_attr_dev);
3219 		if (error)
3220 			goto DevAttrError;
3221 
3222 		error = device_create_sys_dev_entry(dev);
3223 		if (error)
3224 			goto SysEntryError;
3225 
3226 		devtmpfs_create_node(dev);
3227 	}
3228 
3229 	/* Notify clients of device addition.  This call must come
3230 	 * after dpm_sysfs_add() and before kobject_uevent().
3231 	 */
3232 	if (dev->bus)
3233 		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
3234 					     BUS_NOTIFY_ADD_DEVICE, dev);
3235 
3236 	kobject_uevent(&dev->kobj, KOBJ_ADD);
3237 
3238 	/*
3239 	 * Check if any of the other devices (consumers) have been waiting for
3240 	 * this device (supplier) to be added so that they can create a device
3241 	 * link to it.
3242 	 *
3243 	 * This needs to happen after device_pm_add() because device_link_add()
3244 	 * requires the supplier be registered before it's called.
3245 	 *
3246 	 * But this also needs to happen before bus_probe_device() to make sure
3247 	 * waiting consumers can link to it before the driver is bound to the
3248 	 * device and the driver sync_state callback is called for this device.
3249 	 */
3250 	if (dev->fwnode && !dev->fwnode->dev) {
3251 		dev->fwnode->dev = dev;
3252 		fw_devlink_link_device(dev);
3253 	}
3254 
3255 	bus_probe_device(dev);
3256 	if (parent)
3257 		klist_add_tail(&dev->p->knode_parent,
3258 			       &parent->p->klist_children);
3259 
3260 	if (dev->class) {
3261 		mutex_lock(&dev->class->p->mutex);
3262 		/* tie the class to the device */
3263 		klist_add_tail(&dev->p->knode_class,
3264 			       &dev->class->p->klist_devices);
3265 
3266 		/* notify any interfaces that the device is here */
3267 		list_for_each_entry(class_intf,
3268 				    &dev->class->p->interfaces, node)
3269 			if (class_intf->add_dev)
3270 				class_intf->add_dev(dev, class_intf);
3271 		mutex_unlock(&dev->class->p->mutex);
3272 	}
3273 done:
3274 	put_device(dev);
3275 	return error;
3276  SysEntryError:
3277 	if (MAJOR(dev->devt))
3278 		device_remove_file(dev, &dev_attr_dev);
3279  DevAttrError:
3280 	device_pm_remove(dev);
3281 	dpm_sysfs_remove(dev);
3282  DPMError:
3283 	bus_remove_device(dev);
3284  BusError:
3285 	device_remove_attrs(dev);
3286  AttrsError:
3287 	device_remove_class_symlinks(dev);
3288  SymlinkError:
3289 	device_remove_file(dev, &dev_attr_uevent);
3290  attrError:
3291 	device_platform_notify(dev, KOBJ_REMOVE);
3292 platform_error:
3293 	kobject_uevent(&dev->kobj, KOBJ_REMOVE);
3294 	glue_dir = get_glue_dir(dev);
3295 	kobject_del(&dev->kobj);
3296  Error:
3297 	cleanup_glue_dir(dev, glue_dir);
3298 parent_error:
3299 	put_device(parent);
3300 name_error:
3301 	kfree(dev->p);
3302 	dev->p = NULL;
3303 	goto done;
3304 }
3305 EXPORT_SYMBOL_GPL(device_add);
3306 
3307 /**
3308  * device_register - register a device with the system.
3309  * @dev: pointer to the device structure
3310  *
3311  * This happens in two clean steps - initialize the device
3312  * and add it to the system. The two steps can be called
3313  * separately, but this is the easiest and most common.
3314  * I.e. you should only call the two helpers separately if
3315  * have a clearly defined need to use and refcount the device
3316  * before it is added to the hierarchy.
3317  *
3318  * For more information, see the kerneldoc for device_initialize()
3319  * and device_add().
3320  *
3321  * NOTE: _Never_ directly free @dev after calling this function, even
3322  * if it returned an error! Always use put_device() to give up the
3323  * reference initialized in this function instead.
3324  */
device_register(struct device * dev)3325 int device_register(struct device *dev)
3326 {
3327 	device_initialize(dev);
3328 	return device_add(dev);
3329 }
3330 EXPORT_SYMBOL_GPL(device_register);
3331 
3332 /**
3333  * get_device - increment reference count for device.
3334  * @dev: device.
3335  *
3336  * This simply forwards the call to kobject_get(), though
3337  * we do take care to provide for the case that we get a NULL
3338  * pointer passed in.
3339  */
get_device(struct device * dev)3340 struct device *get_device(struct device *dev)
3341 {
3342 	return dev ? kobj_to_dev(kobject_get(&dev->kobj)) : NULL;
3343 }
3344 EXPORT_SYMBOL_GPL(get_device);
3345 
3346 /**
3347  * put_device - decrement reference count.
3348  * @dev: device in question.
3349  */
put_device(struct device * dev)3350 void put_device(struct device *dev)
3351 {
3352 	/* might_sleep(); */
3353 	if (dev)
3354 		kobject_put(&dev->kobj);
3355 }
3356 EXPORT_SYMBOL_GPL(put_device);
3357 
kill_device(struct device * dev)3358 bool kill_device(struct device *dev)
3359 {
3360 	/*
3361 	 * Require the device lock and set the "dead" flag to guarantee that
3362 	 * the update behavior is consistent with the other bitfields near
3363 	 * it and that we cannot have an asynchronous probe routine trying
3364 	 * to run while we are tearing out the bus/class/sysfs from
3365 	 * underneath the device.
3366 	 */
3367 	lockdep_assert_held(&dev->mutex);
3368 
3369 	if (dev->p->dead)
3370 		return false;
3371 	dev->p->dead = true;
3372 	return true;
3373 }
3374 EXPORT_SYMBOL_GPL(kill_device);
3375 
3376 /**
3377  * device_del - delete device from system.
3378  * @dev: device.
3379  *
3380  * This is the first part of the device unregistration
3381  * sequence. This removes the device from the lists we control
3382  * from here, has it removed from the other driver model
3383  * subsystems it was added to in device_add(), and removes it
3384  * from the kobject hierarchy.
3385  *
3386  * NOTE: this should be called manually _iff_ device_add() was
3387  * also called manually.
3388  */
device_del(struct device * dev)3389 void device_del(struct device *dev)
3390 {
3391 	struct device *parent = dev->parent;
3392 	struct kobject *glue_dir = NULL;
3393 	struct class_interface *class_intf;
3394 	unsigned int noio_flag;
3395 
3396 	device_lock(dev);
3397 	kill_device(dev);
3398 	device_unlock(dev);
3399 
3400 	if (dev->fwnode && dev->fwnode->dev == dev)
3401 		dev->fwnode->dev = NULL;
3402 
3403 	/* Notify clients of device removal.  This call must come
3404 	 * before dpm_sysfs_remove().
3405 	 */
3406 	noio_flag = memalloc_noio_save();
3407 	if (dev->bus)
3408 		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
3409 					     BUS_NOTIFY_DEL_DEVICE, dev);
3410 
3411 	dpm_sysfs_remove(dev);
3412 	if (parent)
3413 		klist_del(&dev->p->knode_parent);
3414 	if (MAJOR(dev->devt)) {
3415 		devtmpfs_delete_node(dev);
3416 		device_remove_sys_dev_entry(dev);
3417 		device_remove_file(dev, &dev_attr_dev);
3418 	}
3419 	if (dev->class) {
3420 		device_remove_class_symlinks(dev);
3421 
3422 		mutex_lock(&dev->class->p->mutex);
3423 		/* notify any interfaces that the device is now gone */
3424 		list_for_each_entry(class_intf,
3425 				    &dev->class->p->interfaces, node)
3426 			if (class_intf->remove_dev)
3427 				class_intf->remove_dev(dev, class_intf);
3428 		/* remove the device from the class list */
3429 		klist_del(&dev->p->knode_class);
3430 		mutex_unlock(&dev->class->p->mutex);
3431 	}
3432 	device_remove_file(dev, &dev_attr_uevent);
3433 	device_remove_attrs(dev);
3434 	bus_remove_device(dev);
3435 	device_pm_remove(dev);
3436 	driver_deferred_probe_del(dev);
3437 	device_platform_notify(dev, KOBJ_REMOVE);
3438 	device_remove_properties(dev);
3439 	device_links_purge(dev);
3440 
3441 	if (dev->bus)
3442 		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
3443 					     BUS_NOTIFY_REMOVED_DEVICE, dev);
3444 	kobject_uevent(&dev->kobj, KOBJ_REMOVE);
3445 	glue_dir = get_glue_dir(dev);
3446 	kobject_del(&dev->kobj);
3447 	cleanup_glue_dir(dev, glue_dir);
3448 	memalloc_noio_restore(noio_flag);
3449 	put_device(parent);
3450 }
3451 EXPORT_SYMBOL_GPL(device_del);
3452 
3453 /**
3454  * device_unregister - unregister device from system.
3455  * @dev: device going away.
3456  *
3457  * We do this in two parts, like we do device_register(). First,
3458  * we remove it from all the subsystems with device_del(), then
3459  * we decrement the reference count via put_device(). If that
3460  * is the final reference count, the device will be cleaned up
3461  * via device_release() above. Otherwise, the structure will
3462  * stick around until the final reference to the device is dropped.
3463  */
device_unregister(struct device * dev)3464 void device_unregister(struct device *dev)
3465 {
3466 	pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
3467 	device_del(dev);
3468 	put_device(dev);
3469 }
3470 EXPORT_SYMBOL_GPL(device_unregister);
3471 
prev_device(struct klist_iter * i)3472 static struct device *prev_device(struct klist_iter *i)
3473 {
3474 	struct klist_node *n = klist_prev(i);
3475 	struct device *dev = NULL;
3476 	struct device_private *p;
3477 
3478 	if (n) {
3479 		p = to_device_private_parent(n);
3480 		dev = p->device;
3481 	}
3482 	return dev;
3483 }
3484 
next_device(struct klist_iter * i)3485 static struct device *next_device(struct klist_iter *i)
3486 {
3487 	struct klist_node *n = klist_next(i);
3488 	struct device *dev = NULL;
3489 	struct device_private *p;
3490 
3491 	if (n) {
3492 		p = to_device_private_parent(n);
3493 		dev = p->device;
3494 	}
3495 	return dev;
3496 }
3497 
3498 /**
3499  * device_get_devnode - path of device node file
3500  * @dev: device
3501  * @mode: returned file access mode
3502  * @uid: returned file owner
3503  * @gid: returned file group
3504  * @tmp: possibly allocated string
3505  *
3506  * Return the relative path of a possible device node.
3507  * Non-default names may need to allocate a memory to compose
3508  * a name. This memory is returned in tmp and needs to be
3509  * freed by the caller.
3510  */
device_get_devnode(struct device * dev,umode_t * mode,kuid_t * uid,kgid_t * gid,const char ** tmp)3511 const char *device_get_devnode(struct device *dev,
3512 			       umode_t *mode, kuid_t *uid, kgid_t *gid,
3513 			       const char **tmp)
3514 {
3515 	char *s;
3516 
3517 	*tmp = NULL;
3518 
3519 	/* the device type may provide a specific name */
3520 	if (dev->type && dev->type->devnode)
3521 		*tmp = dev->type->devnode(dev, mode, uid, gid);
3522 	if (*tmp)
3523 		return *tmp;
3524 
3525 	/* the class may provide a specific name */
3526 	if (dev->class && dev->class->devnode)
3527 		*tmp = dev->class->devnode(dev, mode);
3528 	if (*tmp)
3529 		return *tmp;
3530 
3531 	/* return name without allocation, tmp == NULL */
3532 	if (strchr(dev_name(dev), '!') == NULL)
3533 		return dev_name(dev);
3534 
3535 	/* replace '!' in the name with '/' */
3536 	s = kstrdup(dev_name(dev), GFP_KERNEL);
3537 	if (!s)
3538 		return NULL;
3539 	strreplace(s, '!', '/');
3540 	return *tmp = s;
3541 }
3542 
3543 /**
3544  * device_for_each_child - device child iterator.
3545  * @parent: parent struct device.
3546  * @fn: function to be called for each device.
3547  * @data: data for the callback.
3548  *
3549  * Iterate over @parent's child devices, and call @fn for each,
3550  * passing it @data.
3551  *
3552  * We check the return of @fn each time. If it returns anything
3553  * other than 0, we break out and return that value.
3554  */
device_for_each_child(struct device * parent,void * data,int (* fn)(struct device * dev,void * data))3555 int device_for_each_child(struct device *parent, void *data,
3556 			  int (*fn)(struct device *dev, void *data))
3557 {
3558 	struct klist_iter i;
3559 	struct device *child;
3560 	int error = 0;
3561 
3562 	if (!parent->p)
3563 		return 0;
3564 
3565 	klist_iter_init(&parent->p->klist_children, &i);
3566 	while (!error && (child = next_device(&i)))
3567 		error = fn(child, data);
3568 	klist_iter_exit(&i);
3569 	return error;
3570 }
3571 EXPORT_SYMBOL_GPL(device_for_each_child);
3572 
3573 /**
3574  * device_for_each_child_reverse - device child iterator in reversed order.
3575  * @parent: parent struct device.
3576  * @fn: function to be called for each device.
3577  * @data: data for the callback.
3578  *
3579  * Iterate over @parent's child devices, and call @fn for each,
3580  * passing it @data.
3581  *
3582  * We check the return of @fn each time. If it returns anything
3583  * other than 0, we break out and return that value.
3584  */
device_for_each_child_reverse(struct device * parent,void * data,int (* fn)(struct device * dev,void * data))3585 int device_for_each_child_reverse(struct device *parent, void *data,
3586 				  int (*fn)(struct device *dev, void *data))
3587 {
3588 	struct klist_iter i;
3589 	struct device *child;
3590 	int error = 0;
3591 
3592 	if (!parent->p)
3593 		return 0;
3594 
3595 	klist_iter_init(&parent->p->klist_children, &i);
3596 	while ((child = prev_device(&i)) && !error)
3597 		error = fn(child, data);
3598 	klist_iter_exit(&i);
3599 	return error;
3600 }
3601 EXPORT_SYMBOL_GPL(device_for_each_child_reverse);
3602 
3603 /**
3604  * device_find_child - device iterator for locating a particular device.
3605  * @parent: parent struct device
3606  * @match: Callback function to check device
3607  * @data: Data to pass to match function
3608  *
3609  * This is similar to the device_for_each_child() function above, but it
3610  * returns a reference to a device that is 'found' for later use, as
3611  * determined by the @match callback.
3612  *
3613  * The callback should return 0 if the device doesn't match and non-zero
3614  * if it does.  If the callback returns non-zero and a reference to the
3615  * current device can be obtained, this function will return to the caller
3616  * and not iterate over any more devices.
3617  *
3618  * NOTE: you will need to drop the reference with put_device() after use.
3619  */
device_find_child(struct device * parent,void * data,int (* match)(struct device * dev,void * data))3620 struct device *device_find_child(struct device *parent, void *data,
3621 				 int (*match)(struct device *dev, void *data))
3622 {
3623 	struct klist_iter i;
3624 	struct device *child;
3625 
3626 	if (!parent)
3627 		return NULL;
3628 
3629 	klist_iter_init(&parent->p->klist_children, &i);
3630 	while ((child = next_device(&i)))
3631 		if (match(child, data) && get_device(child))
3632 			break;
3633 	klist_iter_exit(&i);
3634 	return child;
3635 }
3636 EXPORT_SYMBOL_GPL(device_find_child);
3637 
3638 /**
3639  * device_find_child_by_name - device iterator for locating a child device.
3640  * @parent: parent struct device
3641  * @name: name of the child device
3642  *
3643  * This is similar to the device_find_child() function above, but it
3644  * returns a reference to a device that has the name @name.
3645  *
3646  * NOTE: you will need to drop the reference with put_device() after use.
3647  */
device_find_child_by_name(struct device * parent,const char * name)3648 struct device *device_find_child_by_name(struct device *parent,
3649 					 const char *name)
3650 {
3651 	struct klist_iter i;
3652 	struct device *child;
3653 
3654 	if (!parent)
3655 		return NULL;
3656 
3657 	klist_iter_init(&parent->p->klist_children, &i);
3658 	while ((child = next_device(&i)))
3659 		if (sysfs_streq(dev_name(child), name) && get_device(child))
3660 			break;
3661 	klist_iter_exit(&i);
3662 	return child;
3663 }
3664 EXPORT_SYMBOL_GPL(device_find_child_by_name);
3665 
devices_init(void)3666 int __init devices_init(void)
3667 {
3668 	devices_kset = kset_create_and_add("devices", &device_uevent_ops, NULL);
3669 	if (!devices_kset)
3670 		return -ENOMEM;
3671 	dev_kobj = kobject_create_and_add("dev", NULL);
3672 	if (!dev_kobj)
3673 		goto dev_kobj_err;
3674 	sysfs_dev_block_kobj = kobject_create_and_add("block", dev_kobj);
3675 	if (!sysfs_dev_block_kobj)
3676 		goto block_kobj_err;
3677 	sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj);
3678 	if (!sysfs_dev_char_kobj)
3679 		goto char_kobj_err;
3680 
3681 	return 0;
3682 
3683  char_kobj_err:
3684 	kobject_put(sysfs_dev_block_kobj);
3685  block_kobj_err:
3686 	kobject_put(dev_kobj);
3687  dev_kobj_err:
3688 	kset_unregister(devices_kset);
3689 	return -ENOMEM;
3690 }
3691 
device_check_offline(struct device * dev,void * not_used)3692 static int device_check_offline(struct device *dev, void *not_used)
3693 {
3694 	int ret;
3695 
3696 	ret = device_for_each_child(dev, NULL, device_check_offline);
3697 	if (ret)
3698 		return ret;
3699 
3700 	return device_supports_offline(dev) && !dev->offline ? -EBUSY : 0;
3701 }
3702 
3703 /**
3704  * device_offline - Prepare the device for hot-removal.
3705  * @dev: Device to be put offline.
3706  *
3707  * Execute the device bus type's .offline() callback, if present, to prepare
3708  * the device for a subsequent hot-removal.  If that succeeds, the device must
3709  * not be used until either it is removed or its bus type's .online() callback
3710  * is executed.
3711  *
3712  * Call under device_hotplug_lock.
3713  */
device_offline(struct device * dev)3714 int device_offline(struct device *dev)
3715 {
3716 	int ret;
3717 
3718 	if (dev->offline_disabled)
3719 		return -EPERM;
3720 
3721 	ret = device_for_each_child(dev, NULL, device_check_offline);
3722 	if (ret)
3723 		return ret;
3724 
3725 	device_lock(dev);
3726 	if (device_supports_offline(dev)) {
3727 		if (dev->offline) {
3728 			ret = 1;
3729 		} else {
3730 			ret = dev->bus->offline(dev);
3731 			if (!ret) {
3732 				kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
3733 				dev->offline = true;
3734 			}
3735 		}
3736 	}
3737 	device_unlock(dev);
3738 
3739 	return ret;
3740 }
3741 
3742 /**
3743  * device_online - Put the device back online after successful device_offline().
3744  * @dev: Device to be put back online.
3745  *
3746  * If device_offline() has been successfully executed for @dev, but the device
3747  * has not been removed subsequently, execute its bus type's .online() callback
3748  * to indicate that the device can be used again.
3749  *
3750  * Call under device_hotplug_lock.
3751  */
device_online(struct device * dev)3752 int device_online(struct device *dev)
3753 {
3754 	int ret = 0;
3755 
3756 	device_lock(dev);
3757 	if (device_supports_offline(dev)) {
3758 		if (dev->offline) {
3759 			ret = dev->bus->online(dev);
3760 			if (!ret) {
3761 				kobject_uevent(&dev->kobj, KOBJ_ONLINE);
3762 				dev->offline = false;
3763 			}
3764 		} else {
3765 			ret = 1;
3766 		}
3767 	}
3768 	device_unlock(dev);
3769 
3770 	return ret;
3771 }
3772 
3773 struct root_device {
3774 	struct device dev;
3775 	struct module *owner;
3776 };
3777 
to_root_device(struct device * d)3778 static inline struct root_device *to_root_device(struct device *d)
3779 {
3780 	return container_of(d, struct root_device, dev);
3781 }
3782 
root_device_release(struct device * dev)3783 static void root_device_release(struct device *dev)
3784 {
3785 	kfree(to_root_device(dev));
3786 }
3787 
3788 /**
3789  * __root_device_register - allocate and register a root device
3790  * @name: root device name
3791  * @owner: owner module of the root device, usually THIS_MODULE
3792  *
3793  * This function allocates a root device and registers it
3794  * using device_register(). In order to free the returned
3795  * device, use root_device_unregister().
3796  *
3797  * Root devices are dummy devices which allow other devices
3798  * to be grouped under /sys/devices. Use this function to
3799  * allocate a root device and then use it as the parent of
3800  * any device which should appear under /sys/devices/{name}
3801  *
3802  * The /sys/devices/{name} directory will also contain a
3803  * 'module' symlink which points to the @owner directory
3804  * in sysfs.
3805  *
3806  * Returns &struct device pointer on success, or ERR_PTR() on error.
3807  *
3808  * Note: You probably want to use root_device_register().
3809  */
__root_device_register(const char * name,struct module * owner)3810 struct device *__root_device_register(const char *name, struct module *owner)
3811 {
3812 	struct root_device *root;
3813 	int err = -ENOMEM;
3814 
3815 	root = kzalloc(sizeof(struct root_device), GFP_KERNEL);
3816 	if (!root)
3817 		return ERR_PTR(err);
3818 
3819 	err = dev_set_name(&root->dev, "%s", name);
3820 	if (err) {
3821 		kfree(root);
3822 		return ERR_PTR(err);
3823 	}
3824 
3825 	root->dev.release = root_device_release;
3826 
3827 	err = device_register(&root->dev);
3828 	if (err) {
3829 		put_device(&root->dev);
3830 		return ERR_PTR(err);
3831 	}
3832 
3833 #ifdef CONFIG_MODULES	/* gotta find a "cleaner" way to do this */
3834 	if (owner) {
3835 		struct module_kobject *mk = &owner->mkobj;
3836 
3837 		err = sysfs_create_link(&root->dev.kobj, &mk->kobj, "module");
3838 		if (err) {
3839 			device_unregister(&root->dev);
3840 			return ERR_PTR(err);
3841 		}
3842 		root->owner = owner;
3843 	}
3844 #endif
3845 
3846 	return &root->dev;
3847 }
3848 EXPORT_SYMBOL_GPL(__root_device_register);
3849 
3850 /**
3851  * root_device_unregister - unregister and free a root device
3852  * @dev: device going away
3853  *
3854  * This function unregisters and cleans up a device that was created by
3855  * root_device_register().
3856  */
root_device_unregister(struct device * dev)3857 void root_device_unregister(struct device *dev)
3858 {
3859 	struct root_device *root = to_root_device(dev);
3860 
3861 	if (root->owner)
3862 		sysfs_remove_link(&root->dev.kobj, "module");
3863 
3864 	device_unregister(dev);
3865 }
3866 EXPORT_SYMBOL_GPL(root_device_unregister);
3867 
3868 
device_create_release(struct device * dev)3869 static void device_create_release(struct device *dev)
3870 {
3871 	pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
3872 	kfree(dev);
3873 }
3874 
3875 static __printf(6, 0) struct device *
device_create_groups_vargs(struct class * class,struct device * parent,dev_t devt,void * drvdata,const struct attribute_group ** groups,const char * fmt,va_list args)3876 device_create_groups_vargs(struct class *class, struct device *parent,
3877 			   dev_t devt, void *drvdata,
3878 			   const struct attribute_group **groups,
3879 			   const char *fmt, va_list args)
3880 {
3881 	struct device *dev = NULL;
3882 	int retval = -ENODEV;
3883 
3884 	if (class == NULL || IS_ERR(class))
3885 		goto error;
3886 
3887 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
3888 	if (!dev) {
3889 		retval = -ENOMEM;
3890 		goto error;
3891 	}
3892 
3893 	device_initialize(dev);
3894 	dev->devt = devt;
3895 	dev->class = class;
3896 	dev->parent = parent;
3897 	dev->groups = groups;
3898 	dev->release = device_create_release;
3899 	dev_set_drvdata(dev, drvdata);
3900 
3901 	retval = kobject_set_name_vargs(&dev->kobj, fmt, args);
3902 	if (retval)
3903 		goto error;
3904 
3905 	retval = device_add(dev);
3906 	if (retval)
3907 		goto error;
3908 
3909 	return dev;
3910 
3911 error:
3912 	put_device(dev);
3913 	return ERR_PTR(retval);
3914 }
3915 
3916 /**
3917  * device_create - creates a device and registers it with sysfs
3918  * @class: pointer to the struct class that this device should be registered to
3919  * @parent: pointer to the parent struct device of this new device, if any
3920  * @devt: the dev_t for the char device to be added
3921  * @drvdata: the data to be added to the device for callbacks
3922  * @fmt: string for the device's name
3923  *
3924  * This function can be used by char device classes.  A struct device
3925  * will be created in sysfs, registered to the specified class.
3926  *
3927  * A "dev" file will be created, showing the dev_t for the device, if
3928  * the dev_t is not 0,0.
3929  * If a pointer to a parent struct device is passed in, the newly created
3930  * struct device will be a child of that device in sysfs.
3931  * The pointer to the struct device will be returned from the call.
3932  * Any further sysfs files that might be required can be created using this
3933  * pointer.
3934  *
3935  * Returns &struct device pointer on success, or ERR_PTR() on error.
3936  *
3937  * Note: the struct class passed to this function must have previously
3938  * been created with a call to class_create().
3939  */
device_create(struct class * class,struct device * parent,dev_t devt,void * drvdata,const char * fmt,...)3940 struct device *device_create(struct class *class, struct device *parent,
3941 			     dev_t devt, void *drvdata, const char *fmt, ...)
3942 {
3943 	va_list vargs;
3944 	struct device *dev;
3945 
3946 	va_start(vargs, fmt);
3947 	dev = device_create_groups_vargs(class, parent, devt, drvdata, NULL,
3948 					  fmt, vargs);
3949 	va_end(vargs);
3950 	return dev;
3951 }
3952 EXPORT_SYMBOL_GPL(device_create);
3953 
3954 /**
3955  * device_create_with_groups - creates a device and registers it with sysfs
3956  * @class: pointer to the struct class that this device should be registered to
3957  * @parent: pointer to the parent struct device of this new device, if any
3958  * @devt: the dev_t for the char device to be added
3959  * @drvdata: the data to be added to the device for callbacks
3960  * @groups: NULL-terminated list of attribute groups to be created
3961  * @fmt: string for the device's name
3962  *
3963  * This function can be used by char device classes.  A struct device
3964  * will be created in sysfs, registered to the specified class.
3965  * Additional attributes specified in the groups parameter will also
3966  * be created automatically.
3967  *
3968  * A "dev" file will be created, showing the dev_t for the device, if
3969  * the dev_t is not 0,0.
3970  * If a pointer to a parent struct device is passed in, the newly created
3971  * struct device will be a child of that device in sysfs.
3972  * The pointer to the struct device will be returned from the call.
3973  * Any further sysfs files that might be required can be created using this
3974  * pointer.
3975  *
3976  * Returns &struct device pointer on success, or ERR_PTR() on error.
3977  *
3978  * Note: the struct class passed to this function must have previously
3979  * been created with a call to class_create().
3980  */
device_create_with_groups(struct class * class,struct device * parent,dev_t devt,void * drvdata,const struct attribute_group ** groups,const char * fmt,...)3981 struct device *device_create_with_groups(struct class *class,
3982 					 struct device *parent, dev_t devt,
3983 					 void *drvdata,
3984 					 const struct attribute_group **groups,
3985 					 const char *fmt, ...)
3986 {
3987 	va_list vargs;
3988 	struct device *dev;
3989 
3990 	va_start(vargs, fmt);
3991 	dev = device_create_groups_vargs(class, parent, devt, drvdata, groups,
3992 					 fmt, vargs);
3993 	va_end(vargs);
3994 	return dev;
3995 }
3996 EXPORT_SYMBOL_GPL(device_create_with_groups);
3997 
3998 /**
3999  * device_destroy - removes a device that was created with device_create()
4000  * @class: pointer to the struct class that this device was registered with
4001  * @devt: the dev_t of the device that was previously registered
4002  *
4003  * This call unregisters and cleans up a device that was created with a
4004  * call to device_create().
4005  */
device_destroy(struct class * class,dev_t devt)4006 void device_destroy(struct class *class, dev_t devt)
4007 {
4008 	struct device *dev;
4009 
4010 	dev = class_find_device_by_devt(class, devt);
4011 	if (dev) {
4012 		put_device(dev);
4013 		device_unregister(dev);
4014 	}
4015 }
4016 EXPORT_SYMBOL_GPL(device_destroy);
4017 
4018 /**
4019  * device_rename - renames a device
4020  * @dev: the pointer to the struct device to be renamed
4021  * @new_name: the new name of the device
4022  *
4023  * It is the responsibility of the caller to provide mutual
4024  * exclusion between two different calls of device_rename
4025  * on the same device to ensure that new_name is valid and
4026  * won't conflict with other devices.
4027  *
4028  * Note: Don't call this function.  Currently, the networking layer calls this
4029  * function, but that will change.  The following text from Kay Sievers offers
4030  * some insight:
4031  *
4032  * Renaming devices is racy at many levels, symlinks and other stuff are not
4033  * replaced atomically, and you get a "move" uevent, but it's not easy to
4034  * connect the event to the old and new device. Device nodes are not renamed at
4035  * all, there isn't even support for that in the kernel now.
4036  *
4037  * In the meantime, during renaming, your target name might be taken by another
4038  * driver, creating conflicts. Or the old name is taken directly after you
4039  * renamed it -- then you get events for the same DEVPATH, before you even see
4040  * the "move" event. It's just a mess, and nothing new should ever rely on
4041  * kernel device renaming. Besides that, it's not even implemented now for
4042  * other things than (driver-core wise very simple) network devices.
4043  *
4044  * We are currently about to change network renaming in udev to completely
4045  * disallow renaming of devices in the same namespace as the kernel uses,
4046  * because we can't solve the problems properly, that arise with swapping names
4047  * of multiple interfaces without races. Means, renaming of eth[0-9]* will only
4048  * be allowed to some other name than eth[0-9]*, for the aforementioned
4049  * reasons.
4050  *
4051  * Make up a "real" name in the driver before you register anything, or add
4052  * some other attributes for userspace to find the device, or use udev to add
4053  * symlinks -- but never rename kernel devices later, it's a complete mess. We
4054  * don't even want to get into that and try to implement the missing pieces in
4055  * the core. We really have other pieces to fix in the driver core mess. :)
4056  */
device_rename(struct device * dev,const char * new_name)4057 int device_rename(struct device *dev, const char *new_name)
4058 {
4059 	struct kobject *kobj = &dev->kobj;
4060 	char *old_device_name = NULL;
4061 	int error;
4062 
4063 	dev = get_device(dev);
4064 	if (!dev)
4065 		return -EINVAL;
4066 
4067 	dev_dbg(dev, "renaming to %s\n", new_name);
4068 
4069 	old_device_name = kstrdup(dev_name(dev), GFP_KERNEL);
4070 	if (!old_device_name) {
4071 		error = -ENOMEM;
4072 		goto out;
4073 	}
4074 
4075 	if (dev->class) {
4076 		error = sysfs_rename_link_ns(&dev->class->p->subsys.kobj,
4077 					     kobj, old_device_name,
4078 					     new_name, kobject_namespace(kobj));
4079 		if (error)
4080 			goto out;
4081 	}
4082 
4083 	error = kobject_rename(kobj, new_name);
4084 	if (error)
4085 		goto out;
4086 
4087 out:
4088 	put_device(dev);
4089 
4090 	kfree(old_device_name);
4091 
4092 	return error;
4093 }
4094 EXPORT_SYMBOL_GPL(device_rename);
4095 
device_move_class_links(struct device * dev,struct device * old_parent,struct device * new_parent)4096 static int device_move_class_links(struct device *dev,
4097 				   struct device *old_parent,
4098 				   struct device *new_parent)
4099 {
4100 	int error = 0;
4101 
4102 	if (old_parent)
4103 		sysfs_remove_link(&dev->kobj, "device");
4104 	if (new_parent)
4105 		error = sysfs_create_link(&dev->kobj, &new_parent->kobj,
4106 					  "device");
4107 	return error;
4108 }
4109 
4110 /**
4111  * device_move - moves a device to a new parent
4112  * @dev: the pointer to the struct device to be moved
4113  * @new_parent: the new parent of the device (can be NULL)
4114  * @dpm_order: how to reorder the dpm_list
4115  */
device_move(struct device * dev,struct device * new_parent,enum dpm_order dpm_order)4116 int device_move(struct device *dev, struct device *new_parent,
4117 		enum dpm_order dpm_order)
4118 {
4119 	int error;
4120 	struct device *old_parent;
4121 	struct kobject *new_parent_kobj;
4122 
4123 	dev = get_device(dev);
4124 	if (!dev)
4125 		return -EINVAL;
4126 
4127 	device_pm_lock();
4128 	new_parent = get_device(new_parent);
4129 	new_parent_kobj = get_device_parent(dev, new_parent);
4130 	if (IS_ERR(new_parent_kobj)) {
4131 		error = PTR_ERR(new_parent_kobj);
4132 		put_device(new_parent);
4133 		goto out;
4134 	}
4135 
4136 	pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev),
4137 		 __func__, new_parent ? dev_name(new_parent) : "<NULL>");
4138 	error = kobject_move(&dev->kobj, new_parent_kobj);
4139 	if (error) {
4140 		cleanup_glue_dir(dev, new_parent_kobj);
4141 		put_device(new_parent);
4142 		goto out;
4143 	}
4144 	old_parent = dev->parent;
4145 	dev->parent = new_parent;
4146 	if (old_parent)
4147 		klist_remove(&dev->p->knode_parent);
4148 	if (new_parent) {
4149 		klist_add_tail(&dev->p->knode_parent,
4150 			       &new_parent->p->klist_children);
4151 		set_dev_node(dev, dev_to_node(new_parent));
4152 	}
4153 
4154 	if (dev->class) {
4155 		error = device_move_class_links(dev, old_parent, new_parent);
4156 		if (error) {
4157 			/* We ignore errors on cleanup since we're hosed anyway... */
4158 			device_move_class_links(dev, new_parent, old_parent);
4159 			if (!kobject_move(&dev->kobj, &old_parent->kobj)) {
4160 				if (new_parent)
4161 					klist_remove(&dev->p->knode_parent);
4162 				dev->parent = old_parent;
4163 				if (old_parent) {
4164 					klist_add_tail(&dev->p->knode_parent,
4165 						       &old_parent->p->klist_children);
4166 					set_dev_node(dev, dev_to_node(old_parent));
4167 				}
4168 			}
4169 			cleanup_glue_dir(dev, new_parent_kobj);
4170 			put_device(new_parent);
4171 			goto out;
4172 		}
4173 	}
4174 	switch (dpm_order) {
4175 	case DPM_ORDER_NONE:
4176 		break;
4177 	case DPM_ORDER_DEV_AFTER_PARENT:
4178 		device_pm_move_after(dev, new_parent);
4179 		devices_kset_move_after(dev, new_parent);
4180 		break;
4181 	case DPM_ORDER_PARENT_BEFORE_DEV:
4182 		device_pm_move_before(new_parent, dev);
4183 		devices_kset_move_before(new_parent, dev);
4184 		break;
4185 	case DPM_ORDER_DEV_LAST:
4186 		device_pm_move_last(dev);
4187 		devices_kset_move_last(dev);
4188 		break;
4189 	}
4190 
4191 	put_device(old_parent);
4192 out:
4193 	device_pm_unlock();
4194 	put_device(dev);
4195 	return error;
4196 }
4197 EXPORT_SYMBOL_GPL(device_move);
4198 
device_attrs_change_owner(struct device * dev,kuid_t kuid,kgid_t kgid)4199 static int device_attrs_change_owner(struct device *dev, kuid_t kuid,
4200 				     kgid_t kgid)
4201 {
4202 	struct kobject *kobj = &dev->kobj;
4203 	struct class *class = dev->class;
4204 	const struct device_type *type = dev->type;
4205 	int error;
4206 
4207 	if (class) {
4208 		/*
4209 		 * Change the device groups of the device class for @dev to
4210 		 * @kuid/@kgid.
4211 		 */
4212 		error = sysfs_groups_change_owner(kobj, class->dev_groups, kuid,
4213 						  kgid);
4214 		if (error)
4215 			return error;
4216 	}
4217 
4218 	if (type) {
4219 		/*
4220 		 * Change the device groups of the device type for @dev to
4221 		 * @kuid/@kgid.
4222 		 */
4223 		error = sysfs_groups_change_owner(kobj, type->groups, kuid,
4224 						  kgid);
4225 		if (error)
4226 			return error;
4227 	}
4228 
4229 	/* Change the device groups of @dev to @kuid/@kgid. */
4230 	error = sysfs_groups_change_owner(kobj, dev->groups, kuid, kgid);
4231 	if (error)
4232 		return error;
4233 
4234 	if (device_supports_offline(dev) && !dev->offline_disabled) {
4235 		/* Change online device attributes of @dev to @kuid/@kgid. */
4236 		error = sysfs_file_change_owner(kobj, dev_attr_online.attr.name,
4237 						kuid, kgid);
4238 		if (error)
4239 			return error;
4240 	}
4241 
4242 	return 0;
4243 }
4244 
4245 /**
4246  * device_change_owner - change the owner of an existing device.
4247  * @dev: device.
4248  * @kuid: new owner's kuid
4249  * @kgid: new owner's kgid
4250  *
4251  * This changes the owner of @dev and its corresponding sysfs entries to
4252  * @kuid/@kgid. This function closely mirrors how @dev was added via driver
4253  * core.
4254  *
4255  * Returns 0 on success or error code on failure.
4256  */
device_change_owner(struct device * dev,kuid_t kuid,kgid_t kgid)4257 int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid)
4258 {
4259 	int error;
4260 	struct kobject *kobj = &dev->kobj;
4261 
4262 	dev = get_device(dev);
4263 	if (!dev)
4264 		return -EINVAL;
4265 
4266 	/*
4267 	 * Change the kobject and the default attributes and groups of the
4268 	 * ktype associated with it to @kuid/@kgid.
4269 	 */
4270 	error = sysfs_change_owner(kobj, kuid, kgid);
4271 	if (error)
4272 		goto out;
4273 
4274 	/*
4275 	 * Change the uevent file for @dev to the new owner. The uevent file
4276 	 * was created in a separate step when @dev got added and we mirror
4277 	 * that step here.
4278 	 */
4279 	error = sysfs_file_change_owner(kobj, dev_attr_uevent.attr.name, kuid,
4280 					kgid);
4281 	if (error)
4282 		goto out;
4283 
4284 	/*
4285 	 * Change the device groups, the device groups associated with the
4286 	 * device class, and the groups associated with the device type of @dev
4287 	 * to @kuid/@kgid.
4288 	 */
4289 	error = device_attrs_change_owner(dev, kuid, kgid);
4290 	if (error)
4291 		goto out;
4292 
4293 	error = dpm_sysfs_change_owner(dev, kuid, kgid);
4294 	if (error)
4295 		goto out;
4296 
4297 #ifdef CONFIG_BLOCK
4298 	if (sysfs_deprecated && dev->class == &block_class)
4299 		goto out;
4300 #endif
4301 
4302 	/*
4303 	 * Change the owner of the symlink located in the class directory of
4304 	 * the device class associated with @dev which points to the actual
4305 	 * directory entry for @dev to @kuid/@kgid. This ensures that the
4306 	 * symlink shows the same permissions as its target.
4307 	 */
4308 	error = sysfs_link_change_owner(&dev->class->p->subsys.kobj, &dev->kobj,
4309 					dev_name(dev), kuid, kgid);
4310 	if (error)
4311 		goto out;
4312 
4313 out:
4314 	put_device(dev);
4315 	return error;
4316 }
4317 EXPORT_SYMBOL_GPL(device_change_owner);
4318 
4319 /**
4320  * device_shutdown - call ->shutdown() on each device to shutdown.
4321  */
device_shutdown(void)4322 void device_shutdown(void)
4323 {
4324 	struct device *dev, *parent;
4325 
4326 	wait_for_device_probe();
4327 	device_block_probing();
4328 
4329 	cpufreq_suspend();
4330 
4331 	spin_lock(&devices_kset->list_lock);
4332 	/*
4333 	 * Walk the devices list backward, shutting down each in turn.
4334 	 * Beware that device unplug events may also start pulling
4335 	 * devices offline, even as the system is shutting down.
4336 	 */
4337 	while (!list_empty(&devices_kset->list)) {
4338 		dev = list_entry(devices_kset->list.prev, struct device,
4339 				kobj.entry);
4340 
4341 		/*
4342 		 * hold reference count of device's parent to
4343 		 * prevent it from being freed because parent's
4344 		 * lock is to be held
4345 		 */
4346 		parent = get_device(dev->parent);
4347 		get_device(dev);
4348 		/*
4349 		 * Make sure the device is off the kset list, in the
4350 		 * event that dev->*->shutdown() doesn't remove it.
4351 		 */
4352 		list_del_init(&dev->kobj.entry);
4353 		spin_unlock(&devices_kset->list_lock);
4354 
4355 		/* hold lock to avoid race with probe/release */
4356 		if (parent)
4357 			device_lock(parent);
4358 		device_lock(dev);
4359 
4360 		/* Don't allow any more runtime suspends */
4361 		pm_runtime_get_noresume(dev);
4362 		pm_runtime_barrier(dev);
4363 
4364 		if (dev->class && dev->class->shutdown_pre) {
4365 			if (initcall_debug)
4366 				dev_info(dev, "shutdown_pre\n");
4367 			dev->class->shutdown_pre(dev);
4368 		}
4369 		if (dev->bus && dev->bus->shutdown) {
4370 			if (initcall_debug)
4371 				dev_info(dev, "shutdown\n");
4372 			dev->bus->shutdown(dev);
4373 		} else if (dev->driver && dev->driver->shutdown) {
4374 			if (initcall_debug)
4375 				dev_info(dev, "shutdown\n");
4376 			dev->driver->shutdown(dev);
4377 		}
4378 
4379 		device_unlock(dev);
4380 		if (parent)
4381 			device_unlock(parent);
4382 
4383 		put_device(dev);
4384 		put_device(parent);
4385 
4386 		spin_lock(&devices_kset->list_lock);
4387 	}
4388 	spin_unlock(&devices_kset->list_lock);
4389 }
4390 
4391 /*
4392  * Device logging functions
4393  */
4394 
4395 #ifdef CONFIG_PRINTK
4396 static void
set_dev_info(const struct device * dev,struct dev_printk_info * dev_info)4397 set_dev_info(const struct device *dev, struct dev_printk_info *dev_info)
4398 {
4399 	const char *subsys;
4400 
4401 	memset(dev_info, 0, sizeof(*dev_info));
4402 
4403 	if (dev->class)
4404 		subsys = dev->class->name;
4405 	else if (dev->bus)
4406 		subsys = dev->bus->name;
4407 	else
4408 		return;
4409 
4410 	strscpy(dev_info->subsystem, subsys, sizeof(dev_info->subsystem));
4411 
4412 	/*
4413 	 * Add device identifier DEVICE=:
4414 	 *   b12:8         block dev_t
4415 	 *   c127:3        char dev_t
4416 	 *   n8            netdev ifindex
4417 	 *   +sound:card0  subsystem:devname
4418 	 */
4419 	if (MAJOR(dev->devt)) {
4420 		char c;
4421 
4422 		if (strcmp(subsys, "block") == 0)
4423 			c = 'b';
4424 		else
4425 			c = 'c';
4426 
4427 		snprintf(dev_info->device, sizeof(dev_info->device),
4428 			 "%c%u:%u", c, MAJOR(dev->devt), MINOR(dev->devt));
4429 	} else if (strcmp(subsys, "net") == 0) {
4430 		struct net_device *net = to_net_dev(dev);
4431 
4432 		snprintf(dev_info->device, sizeof(dev_info->device),
4433 			 "n%u", net->ifindex);
4434 	} else {
4435 		snprintf(dev_info->device, sizeof(dev_info->device),
4436 			 "+%s:%s", subsys, dev_name(dev));
4437 	}
4438 }
4439 
dev_vprintk_emit(int level,const struct device * dev,const char * fmt,va_list args)4440 int dev_vprintk_emit(int level, const struct device *dev,
4441 		     const char *fmt, va_list args)
4442 {
4443 	struct dev_printk_info dev_info;
4444 
4445 	set_dev_info(dev, &dev_info);
4446 
4447 	return vprintk_emit(0, level, &dev_info, fmt, args);
4448 }
4449 EXPORT_SYMBOL(dev_vprintk_emit);
4450 
dev_printk_emit(int level,const struct device * dev,const char * fmt,...)4451 int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...)
4452 {
4453 	va_list args;
4454 	int r;
4455 
4456 	va_start(args, fmt);
4457 
4458 	r = dev_vprintk_emit(level, dev, fmt, args);
4459 
4460 	va_end(args);
4461 
4462 	return r;
4463 }
4464 EXPORT_SYMBOL(dev_printk_emit);
4465 
__dev_printk(const char * level,const struct device * dev,struct va_format * vaf)4466 static void __dev_printk(const char *level, const struct device *dev,
4467 			struct va_format *vaf)
4468 {
4469 	if (dev)
4470 		dev_printk_emit(level[1] - '0', dev, "%s %s: %pV",
4471 				dev_driver_string(dev), dev_name(dev), vaf);
4472 	else
4473 		printk("%s(NULL device *): %pV", level, vaf);
4474 }
4475 
dev_printk(const char * level,const struct device * dev,const char * fmt,...)4476 void dev_printk(const char *level, const struct device *dev,
4477 		const char *fmt, ...)
4478 {
4479 	struct va_format vaf;
4480 	va_list args;
4481 
4482 	va_start(args, fmt);
4483 
4484 	vaf.fmt = fmt;
4485 	vaf.va = &args;
4486 
4487 	__dev_printk(level, dev, &vaf);
4488 
4489 	va_end(args);
4490 }
4491 EXPORT_SYMBOL(dev_printk);
4492 
4493 #define define_dev_printk_level(func, kern_level)		\
4494 void func(const struct device *dev, const char *fmt, ...)	\
4495 {								\
4496 	struct va_format vaf;					\
4497 	va_list args;						\
4498 								\
4499 	va_start(args, fmt);					\
4500 								\
4501 	vaf.fmt = fmt;						\
4502 	vaf.va = &args;						\
4503 								\
4504 	__dev_printk(kern_level, dev, &vaf);			\
4505 								\
4506 	va_end(args);						\
4507 }								\
4508 EXPORT_SYMBOL(func);
4509 
4510 define_dev_printk_level(_dev_emerg, KERN_EMERG);
4511 define_dev_printk_level(_dev_alert, KERN_ALERT);
4512 define_dev_printk_level(_dev_crit, KERN_CRIT);
4513 define_dev_printk_level(_dev_err, KERN_ERR);
4514 define_dev_printk_level(_dev_warn, KERN_WARNING);
4515 define_dev_printk_level(_dev_notice, KERN_NOTICE);
4516 define_dev_printk_level(_dev_info, KERN_INFO);
4517 
4518 #endif
4519 
4520 /**
4521  * dev_err_probe - probe error check and log helper
4522  * @dev: the pointer to the struct device
4523  * @err: error value to test
4524  * @fmt: printf-style format string
4525  * @...: arguments as specified in the format string
4526  *
4527  * This helper implements common pattern present in probe functions for error
4528  * checking: print debug or error message depending if the error value is
4529  * -EPROBE_DEFER and propagate error upwards.
4530  * In case of -EPROBE_DEFER it sets also defer probe reason, which can be
4531  * checked later by reading devices_deferred debugfs attribute.
4532  * It replaces code sequence::
4533  *
4534  * 	if (err != -EPROBE_DEFER)
4535  * 		dev_err(dev, ...);
4536  * 	else
4537  * 		dev_dbg(dev, ...);
4538  * 	return err;
4539  *
4540  * with::
4541  *
4542  * 	return dev_err_probe(dev, err, ...);
4543  *
4544  * Returns @err.
4545  *
4546  */
dev_err_probe(const struct device * dev,int err,const char * fmt,...)4547 int dev_err_probe(const struct device *dev, int err, const char *fmt, ...)
4548 {
4549 	struct va_format vaf;
4550 	va_list args;
4551 
4552 	va_start(args, fmt);
4553 	vaf.fmt = fmt;
4554 	vaf.va = &args;
4555 
4556 	if (err != -EPROBE_DEFER) {
4557 		dev_err(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
4558 	} else {
4559 		device_set_deferred_probe_reason(dev, &vaf);
4560 		dev_dbg(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
4561 	}
4562 
4563 	va_end(args);
4564 
4565 	return err;
4566 }
4567 EXPORT_SYMBOL_GPL(dev_err_probe);
4568 
fwnode_is_primary(struct fwnode_handle * fwnode)4569 static inline bool fwnode_is_primary(struct fwnode_handle *fwnode)
4570 {
4571 	return fwnode && !IS_ERR(fwnode->secondary);
4572 }
4573 
4574 /**
4575  * set_primary_fwnode - Change the primary firmware node of a given device.
4576  * @dev: Device to handle.
4577  * @fwnode: New primary firmware node of the device.
4578  *
4579  * Set the device's firmware node pointer to @fwnode, but if a secondary
4580  * firmware node of the device is present, preserve it.
4581  */
set_primary_fwnode(struct device * dev,struct fwnode_handle * fwnode)4582 void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
4583 {
4584 	struct device *parent = dev->parent;
4585 	struct fwnode_handle *fn = dev->fwnode;
4586 
4587 	if (fwnode) {
4588 		if (fwnode_is_primary(fn))
4589 			fn = fn->secondary;
4590 
4591 		if (fn) {
4592 			WARN_ON(fwnode->secondary);
4593 			fwnode->secondary = fn;
4594 		}
4595 		dev->fwnode = fwnode;
4596 	} else {
4597 		if (fwnode_is_primary(fn)) {
4598 			dev->fwnode = fn->secondary;
4599 			if (!(parent && fn == parent->fwnode))
4600 				fn->secondary = NULL;
4601 		} else {
4602 			dev->fwnode = NULL;
4603 		}
4604 	}
4605 }
4606 EXPORT_SYMBOL_GPL(set_primary_fwnode);
4607 
4608 /**
4609  * set_secondary_fwnode - Change the secondary firmware node of a given device.
4610  * @dev: Device to handle.
4611  * @fwnode: New secondary firmware node of the device.
4612  *
4613  * If a primary firmware node of the device is present, set its secondary
4614  * pointer to @fwnode.  Otherwise, set the device's firmware node pointer to
4615  * @fwnode.
4616  */
set_secondary_fwnode(struct device * dev,struct fwnode_handle * fwnode)4617 void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
4618 {
4619 	if (fwnode)
4620 		fwnode->secondary = ERR_PTR(-ENODEV);
4621 
4622 	if (fwnode_is_primary(dev->fwnode))
4623 		dev->fwnode->secondary = fwnode;
4624 	else
4625 		dev->fwnode = fwnode;
4626 }
4627 EXPORT_SYMBOL_GPL(set_secondary_fwnode);
4628 
4629 /**
4630  * device_set_of_node_from_dev - reuse device-tree node of another device
4631  * @dev: device whose device-tree node is being set
4632  * @dev2: device whose device-tree node is being reused
4633  *
4634  * Takes another reference to the new device-tree node after first dropping
4635  * any reference held to the old node.
4636  */
device_set_of_node_from_dev(struct device * dev,const struct device * dev2)4637 void device_set_of_node_from_dev(struct device *dev, const struct device *dev2)
4638 {
4639 	of_node_put(dev->of_node);
4640 	dev->of_node = of_node_get(dev2->of_node);
4641 	dev->of_node_reused = true;
4642 }
4643 EXPORT_SYMBOL_GPL(device_set_of_node_from_dev);
4644 
device_set_node(struct device * dev,struct fwnode_handle * fwnode)4645 void device_set_node(struct device *dev, struct fwnode_handle *fwnode)
4646 {
4647 	dev->fwnode = fwnode;
4648 	dev->of_node = to_of_node(fwnode);
4649 }
4650 EXPORT_SYMBOL_GPL(device_set_node);
4651 
device_match_name(struct device * dev,const void * name)4652 int device_match_name(struct device *dev, const void *name)
4653 {
4654 	return sysfs_streq(dev_name(dev), name);
4655 }
4656 EXPORT_SYMBOL_GPL(device_match_name);
4657 
device_match_of_node(struct device * dev,const void * np)4658 int device_match_of_node(struct device *dev, const void *np)
4659 {
4660 	return dev->of_node == np;
4661 }
4662 EXPORT_SYMBOL_GPL(device_match_of_node);
4663 
device_match_fwnode(struct device * dev,const void * fwnode)4664 int device_match_fwnode(struct device *dev, const void *fwnode)
4665 {
4666 	return dev_fwnode(dev) == fwnode;
4667 }
4668 EXPORT_SYMBOL_GPL(device_match_fwnode);
4669 
device_match_devt(struct device * dev,const void * pdevt)4670 int device_match_devt(struct device *dev, const void *pdevt)
4671 {
4672 	return dev->devt == *(dev_t *)pdevt;
4673 }
4674 EXPORT_SYMBOL_GPL(device_match_devt);
4675 
device_match_acpi_dev(struct device * dev,const void * adev)4676 int device_match_acpi_dev(struct device *dev, const void *adev)
4677 {
4678 	return ACPI_COMPANION(dev) == adev;
4679 }
4680 EXPORT_SYMBOL(device_match_acpi_dev);
4681 
device_match_any(struct device * dev,const void * unused)4682 int device_match_any(struct device *dev, const void *unused)
4683 {
4684 	return 1;
4685 }
4686 EXPORT_SYMBOL_GPL(device_match_any);
4687