• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * device.h - generic, centralized driver model
4  *
5  * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
6  * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
7  * Copyright (c) 2008-2009 Novell Inc.
8  *
9  * See Documentation/driver-api/driver-model/ for more information.
10  */
11 
12 #ifndef _DEVICE_H_
13 #define _DEVICE_H_
14 
15 #include <linux/dev_printk.h>
16 #include <linux/energy_model.h>
17 #include <linux/ioport.h>
18 #include <linux/kobject.h>
19 #include <linux/klist.h>
20 #include <linux/list.h>
21 #include <linux/lockdep.h>
22 #include <linux/compiler.h>
23 #include <linux/types.h>
24 #include <linux/mutex.h>
25 #include <linux/pm.h>
26 #include <linux/atomic.h>
27 #include <linux/uidgid.h>
28 #include <linux/gfp.h>
29 #include <linux/overflow.h>
30 #include <linux/device/bus.h>
31 #include <linux/device/class.h>
32 #include <linux/device/driver.h>
33 #include <asm/device.h>
34 
35 struct device;
36 struct device_private;
37 struct device_driver;
38 struct driver_private;
39 struct module;
40 struct class;
41 struct subsys_private;
42 struct device_node;
43 struct fwnode_handle;
44 struct iommu_ops;
45 struct iommu_group;
46 struct dev_pin_info;
47 struct dev_iommu;
48 
49 /**
50  * struct subsys_interface - interfaces to device functions
51  * @name:       name of the device function
52  * @subsys:     subsytem of the devices to attach to
53  * @node:       the list of functions registered at the subsystem
54  * @add_dev:    device hookup to device function handler
55  * @remove_dev: device hookup to device function handler
56  *
57  * Simple interfaces attached to a subsystem. Multiple interfaces can
58  * attach to a subsystem and its devices. Unlike drivers, they do not
59  * exclusively claim or control devices. Interfaces usually represent
60  * a specific functionality of a subsystem/class of devices.
61  */
62 struct subsys_interface {
63 	const char *name;
64 	struct bus_type *subsys;
65 	struct list_head node;
66 	int (*add_dev)(struct device *dev, struct subsys_interface *sif);
67 	void (*remove_dev)(struct device *dev, struct subsys_interface *sif);
68 };
69 
70 int subsys_interface_register(struct subsys_interface *sif);
71 void subsys_interface_unregister(struct subsys_interface *sif);
72 
73 int subsys_system_register(struct bus_type *subsys,
74 			   const struct attribute_group **groups);
75 int subsys_virtual_register(struct bus_type *subsys,
76 			    const struct attribute_group **groups);
77 
78 /*
79  * The type of device, "struct device" is embedded in. A class
80  * or bus can contain devices of different types
81  * like "partitions" and "disks", "mouse" and "event".
82  * This identifies the device type and carries type-specific
83  * information, equivalent to the kobj_type of a kobject.
84  * If "name" is specified, the uevent will contain it in
85  * the DEVTYPE variable.
86  */
87 struct device_type {
88 	const char *name;
89 	const struct attribute_group **groups;
90 	int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
91 	char *(*devnode)(struct device *dev, umode_t *mode,
92 			 kuid_t *uid, kgid_t *gid);
93 	void (*release)(struct device *dev);
94 
95 	const struct dev_pm_ops *pm;
96 };
97 
98 /* interface for exporting device attributes */
99 struct device_attribute {
100 	struct attribute	attr;
101 	ssize_t (*show)(struct device *dev, struct device_attribute *attr,
102 			char *buf);
103 	ssize_t (*store)(struct device *dev, struct device_attribute *attr,
104 			 const char *buf, size_t count);
105 };
106 
107 struct dev_ext_attribute {
108 	struct device_attribute attr;
109 	void *var;
110 };
111 
112 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
113 			  char *buf);
114 ssize_t device_store_ulong(struct device *dev, struct device_attribute *attr,
115 			   const char *buf, size_t count);
116 ssize_t device_show_int(struct device *dev, struct device_attribute *attr,
117 			char *buf);
118 ssize_t device_store_int(struct device *dev, struct device_attribute *attr,
119 			 const char *buf, size_t count);
120 ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
121 			char *buf);
122 ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
123 			 const char *buf, size_t count);
124 
125 #define DEVICE_ATTR(_name, _mode, _show, _store) \
126 	struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
127 #define DEVICE_ATTR_PREALLOC(_name, _mode, _show, _store) \
128 	struct device_attribute dev_attr_##_name = \
129 		__ATTR_PREALLOC(_name, _mode, _show, _store)
130 #define DEVICE_ATTR_RW(_name) \
131 	struct device_attribute dev_attr_##_name = __ATTR_RW(_name)
132 #define DEVICE_ATTR_ADMIN_RW(_name) \
133 	struct device_attribute dev_attr_##_name = __ATTR_RW_MODE(_name, 0600)
134 #define DEVICE_ATTR_RO(_name) \
135 	struct device_attribute dev_attr_##_name = __ATTR_RO(_name)
136 #define DEVICE_ATTR_ADMIN_RO(_name) \
137 	struct device_attribute dev_attr_##_name = __ATTR_RO_MODE(_name, 0400)
138 #define DEVICE_ATTR_WO(_name) \
139 	struct device_attribute dev_attr_##_name = __ATTR_WO(_name)
140 #define DEVICE_ULONG_ATTR(_name, _mode, _var) \
141 	struct dev_ext_attribute dev_attr_##_name = \
142 		{ __ATTR(_name, _mode, device_show_ulong, device_store_ulong), &(_var) }
143 #define DEVICE_INT_ATTR(_name, _mode, _var) \
144 	struct dev_ext_attribute dev_attr_##_name = \
145 		{ __ATTR(_name, _mode, device_show_int, device_store_int), &(_var) }
146 #define DEVICE_BOOL_ATTR(_name, _mode, _var) \
147 	struct dev_ext_attribute dev_attr_##_name = \
148 		{ __ATTR(_name, _mode, device_show_bool, device_store_bool), &(_var) }
149 #define DEVICE_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \
150 	struct device_attribute dev_attr_##_name =		\
151 		__ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
152 
153 int device_create_file(struct device *device,
154 		       const struct device_attribute *entry);
155 void device_remove_file(struct device *dev,
156 			const struct device_attribute *attr);
157 bool device_remove_file_self(struct device *dev,
158 			     const struct device_attribute *attr);
159 int __must_check device_create_bin_file(struct device *dev,
160 					const struct bin_attribute *attr);
161 void device_remove_bin_file(struct device *dev,
162 			    const struct bin_attribute *attr);
163 
164 /* device resource management */
165 typedef void (*dr_release_t)(struct device *dev, void *res);
166 typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data);
167 
168 #ifdef CONFIG_DEBUG_DEVRES
169 void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp,
170 			  int nid, const char *name) __malloc;
171 #define devres_alloc(release, size, gfp) \
172 	__devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release)
173 #define devres_alloc_node(release, size, gfp, nid) \
174 	__devres_alloc_node(release, size, gfp, nid, #release)
175 #else
176 void *devres_alloc_node(dr_release_t release, size_t size,
177 			gfp_t gfp, int nid) __malloc;
devres_alloc(dr_release_t release,size_t size,gfp_t gfp)178 static inline void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp)
179 {
180 	return devres_alloc_node(release, size, gfp, NUMA_NO_NODE);
181 }
182 #endif
183 
184 void devres_for_each_res(struct device *dev, dr_release_t release,
185 			 dr_match_t match, void *match_data,
186 			 void (*fn)(struct device *, void *, void *),
187 			 void *data);
188 void devres_free(void *res);
189 void devres_add(struct device *dev, void *res);
190 void *devres_find(struct device *dev, dr_release_t release,
191 		  dr_match_t match, void *match_data);
192 void *devres_get(struct device *dev, void *new_res,
193 		 dr_match_t match, void *match_data);
194 void *devres_remove(struct device *dev, dr_release_t release,
195 		    dr_match_t match, void *match_data);
196 int devres_destroy(struct device *dev, dr_release_t release,
197 		   dr_match_t match, void *match_data);
198 int devres_release(struct device *dev, dr_release_t release,
199 		   dr_match_t match, void *match_data);
200 
201 /* devres group */
202 void * __must_check devres_open_group(struct device *dev, void *id, gfp_t gfp);
203 void devres_close_group(struct device *dev, void *id);
204 void devres_remove_group(struct device *dev, void *id);
205 int devres_release_group(struct device *dev, void *id);
206 
207 /* managed devm_k.alloc/kfree for device drivers */
208 void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __malloc;
209 void *devm_krealloc(struct device *dev, void *ptr, size_t size,
210 		    gfp_t gfp) __must_check;
211 __printf(3, 0) char *devm_kvasprintf(struct device *dev, gfp_t gfp,
212 				     const char *fmt, va_list ap) __malloc;
213 __printf(3, 4) char *devm_kasprintf(struct device *dev, gfp_t gfp,
214 				    const char *fmt, ...) __malloc;
devm_kzalloc(struct device * dev,size_t size,gfp_t gfp)215 static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
216 {
217 	return devm_kmalloc(dev, size, gfp | __GFP_ZERO);
218 }
devm_kmalloc_array(struct device * dev,size_t n,size_t size,gfp_t flags)219 static inline void *devm_kmalloc_array(struct device *dev,
220 				       size_t n, size_t size, gfp_t flags)
221 {
222 	size_t bytes;
223 
224 	if (unlikely(check_mul_overflow(n, size, &bytes)))
225 		return NULL;
226 
227 	return devm_kmalloc(dev, bytes, flags);
228 }
devm_kcalloc(struct device * dev,size_t n,size_t size,gfp_t flags)229 static inline void *devm_kcalloc(struct device *dev,
230 				 size_t n, size_t size, gfp_t flags)
231 {
232 	return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO);
233 }
234 void devm_kfree(struct device *dev, const void *p);
235 char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc;
236 const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp);
237 void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp);
238 
239 unsigned long devm_get_free_pages(struct device *dev,
240 				  gfp_t gfp_mask, unsigned int order);
241 void devm_free_pages(struct device *dev, unsigned long addr);
242 
243 void __iomem *devm_ioremap_resource(struct device *dev,
244 				    const struct resource *res);
245 void __iomem *devm_ioremap_resource_wc(struct device *dev,
246 				       const struct resource *res);
247 
248 void __iomem *devm_of_iomap(struct device *dev,
249 			    struct device_node *node, int index,
250 			    resource_size_t *size);
251 
252 /* allows to add/remove a custom action to devres stack */
253 int devm_add_action(struct device *dev, void (*action)(void *), void *data);
254 void devm_remove_action(struct device *dev, void (*action)(void *), void *data);
255 void devm_release_action(struct device *dev, void (*action)(void *), void *data);
256 
devm_add_action_or_reset(struct device * dev,void (* action)(void *),void * data)257 static inline int devm_add_action_or_reset(struct device *dev,
258 					   void (*action)(void *), void *data)
259 {
260 	int ret;
261 
262 	ret = devm_add_action(dev, action, data);
263 	if (ret)
264 		action(data);
265 
266 	return ret;
267 }
268 
269 /**
270  * devm_alloc_percpu - Resource-managed alloc_percpu
271  * @dev: Device to allocate per-cpu memory for
272  * @type: Type to allocate per-cpu memory for
273  *
274  * Managed alloc_percpu. Per-cpu memory allocated with this function is
275  * automatically freed on driver detach.
276  *
277  * RETURNS:
278  * Pointer to allocated memory on success, NULL on failure.
279  */
280 #define devm_alloc_percpu(dev, type)      \
281 	((typeof(type) __percpu *)__devm_alloc_percpu((dev), sizeof(type), \
282 						      __alignof__(type)))
283 
284 void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
285 				   size_t align);
286 void devm_free_percpu(struct device *dev, void __percpu *pdata);
287 
288 struct device_dma_parameters {
289 	/*
290 	 * a low level driver may set these to teach IOMMU code about
291 	 * sg limitations.
292 	 */
293 	unsigned int max_segment_size;
294 	unsigned int min_align_mask;
295 	unsigned long segment_boundary_mask;
296 };
297 
298 /**
299  * enum device_link_state - Device link states.
300  * @DL_STATE_NONE: The presence of the drivers is not being tracked.
301  * @DL_STATE_DORMANT: None of the supplier/consumer drivers is present.
302  * @DL_STATE_AVAILABLE: The supplier driver is present, but the consumer is not.
303  * @DL_STATE_CONSUMER_PROBE: The consumer is probing (supplier driver present).
304  * @DL_STATE_ACTIVE: Both the supplier and consumer drivers are present.
305  * @DL_STATE_SUPPLIER_UNBIND: The supplier driver is unbinding.
306  */
307 enum device_link_state {
308 	DL_STATE_NONE = -1,
309 	DL_STATE_DORMANT = 0,
310 	DL_STATE_AVAILABLE,
311 	DL_STATE_CONSUMER_PROBE,
312 	DL_STATE_ACTIVE,
313 	DL_STATE_SUPPLIER_UNBIND,
314 };
315 
316 /*
317  * Device link flags.
318  *
319  * STATELESS: The core will not remove this link automatically.
320  * AUTOREMOVE_CONSUMER: Remove the link automatically on consumer driver unbind.
321  * PM_RUNTIME: If set, the runtime PM framework will use this link.
322  * RPM_ACTIVE: Run pm_runtime_get_sync() on the supplier during link creation.
323  * AUTOREMOVE_SUPPLIER: Remove the link automatically on supplier driver unbind.
324  * AUTOPROBE_CONSUMER: Probe consumer driver automatically after supplier binds.
325  * MANAGED: The core tracks presence of supplier/consumer drivers (internal).
326  * SYNC_STATE_ONLY: Link only affects sync_state() behavior.
327  */
328 #define DL_FLAG_STATELESS		BIT(0)
329 #define DL_FLAG_AUTOREMOVE_CONSUMER	BIT(1)
330 #define DL_FLAG_PM_RUNTIME		BIT(2)
331 #define DL_FLAG_RPM_ACTIVE		BIT(3)
332 #define DL_FLAG_AUTOREMOVE_SUPPLIER	BIT(4)
333 #define DL_FLAG_AUTOPROBE_CONSUMER	BIT(5)
334 #define DL_FLAG_MANAGED			BIT(6)
335 #define DL_FLAG_SYNC_STATE_ONLY		BIT(7)
336 
337 /**
338  * enum dl_dev_state - Device driver presence tracking information.
339  * @DL_DEV_NO_DRIVER: There is no driver attached to the device.
340  * @DL_DEV_PROBING: A driver is probing.
341  * @DL_DEV_DRIVER_BOUND: The driver has been bound to the device.
342  * @DL_DEV_UNBINDING: The driver is unbinding from the device.
343  */
344 enum dl_dev_state {
345 	DL_DEV_NO_DRIVER = 0,
346 	DL_DEV_PROBING,
347 	DL_DEV_DRIVER_BOUND,
348 	DL_DEV_UNBINDING,
349 };
350 
351 /**
352  * struct dev_links_info - Device data related to device links.
353  * @suppliers: List of links to supplier devices.
354  * @consumers: List of links to consumer devices.
355  * @needs_suppliers: Hook to global list of devices waiting for suppliers.
356  * @defer_hook: Hook to global list of devices that have deferred sync_state or
357  *		deferred fw_devlink.
358  * @need_for_probe: If needs_suppliers is on a list, this indicates if the
359  *		    suppliers are needed for probe or not.
360  * @status: Driver status information.
361  */
362 struct dev_links_info {
363 	struct list_head suppliers;
364 	struct list_head consumers;
365 	struct list_head needs_suppliers;
366 	struct list_head defer_hook;
367 	bool need_for_probe;
368 	enum dl_dev_state status;
369 };
370 
371 /**
372  * struct device - The basic device structure
373  * @parent:	The device's "parent" device, the device to which it is attached.
374  * 		In most cases, a parent device is some sort of bus or host
375  * 		controller. If parent is NULL, the device, is a top-level device,
376  * 		which is not usually what you want.
377  * @p:		Holds the private data of the driver core portions of the device.
378  * 		See the comment of the struct device_private for detail.
379  * @kobj:	A top-level, abstract class from which other classes are derived.
380  * @init_name:	Initial name of the device.
381  * @type:	The type of device.
382  * 		This identifies the device type and carries type-specific
383  * 		information.
384  * @mutex:	Mutex to synchronize calls to its driver.
385  * @lockdep_mutex: An optional debug lock that a subsystem can use as a
386  * 		peer lock to gain localized lockdep coverage of the device_lock.
387  * @bus:	Type of bus device is on.
388  * @driver:	Which driver has allocated this
389  * @platform_data: Platform data specific to the device.
390  * 		Example: For devices on custom boards, as typical of embedded
391  * 		and SOC based hardware, Linux often uses platform_data to point
392  * 		to board-specific structures describing devices and how they
393  * 		are wired.  That can include what ports are available, chip
394  * 		variants, which GPIO pins act in what additional roles, and so
395  * 		on.  This shrinks the "Board Support Packages" (BSPs) and
396  * 		minimizes board-specific #ifdefs in drivers.
397  * @driver_data: Private pointer for driver specific info.
398  * @links:	Links to suppliers and consumers of this device.
399  * @power:	For device power management.
400  *		See Documentation/driver-api/pm/devices.rst for details.
401  * @pm_domain:	Provide callbacks that are executed during system suspend,
402  * 		hibernation, system resume and during runtime PM transitions
403  * 		along with subsystem-level and driver-level callbacks.
404  * @em_pd:	device's energy model performance domain
405  * @pins:	For device pin management.
406  *		See Documentation/driver-api/pinctl.rst for details.
407  * @msi_list:	Hosts MSI descriptors
408  * @msi_domain: The generic MSI domain this device is using.
409  * @numa_node:	NUMA node this device is close to.
410  * @dma_ops:    DMA mapping operations for this device.
411  * @dma_mask:	Dma mask (if dma'ble device).
412  * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
413  * 		hardware supports 64-bit addresses for consistent allocations
414  * 		such descriptors.
415  * @bus_dma_limit: Limit of an upstream bridge or bus which imposes a smaller
416  *		DMA limit than the device itself supports.
417  * @dma_range_map: map for DMA memory ranges relative to that of RAM
418  * @dma_parms:	A low level driver may set these to teach IOMMU code about
419  * 		segment limitations.
420  * @dma_pools:	Dma pools (if dma'ble device).
421  * @dma_mem:	Internal for coherent mem override.
422  * @cma_area:	Contiguous memory area for dma allocations
423  * @archdata:	For arch-specific additions.
424  * @of_node:	Associated device tree node.
425  * @fwnode:	Associated device node supplied by platform firmware.
426  * @devt:	For creating the sysfs "dev".
427  * @id:		device instance
428  * @devres_lock: Spinlock to protect the resource of the device.
429  * @devres_head: The resources list of the device.
430  * @knode_class: The node used to add the device to the class list.
431  * @class:	The class of the device.
432  * @groups:	Optional attribute groups.
433  * @release:	Callback to free the device after all references have
434  * 		gone away. This should be set by the allocator of the
435  * 		device (i.e. the bus driver that discovered the device).
436  * @iommu_group: IOMMU group the device belongs to.
437  * @iommu:	Per device generic IOMMU runtime data
438  *
439  * @offline_disabled: If set, the device is permanently online.
440  * @offline:	Set after successful invocation of bus type's .offline().
441  * @of_node_reused: Set if the device-tree node is shared with an ancestor
442  *              device.
443  * @state_synced: The hardware state of this device has been synced to match
444  *		  the software state of this device by calling the driver/bus
445  *		  sync_state() callback.
446  * @dma_coherent: this particular device is dma coherent, even if the
447  *		architecture supports non-coherent devices.
448  * @dma_ops_bypass: If set to %true then the dma_ops are bypassed for the
449  *		streaming DMA operations (->map_* / ->unmap_* / ->sync_*),
450  *		and optionall (if the coherent mask is large enough) also
451  *		for dma allocations.  This flag is managed by the dma ops
452  *		instance from ->dma_supported.
453  *
454  * At the lowest level, every device in a Linux system is represented by an
455  * instance of struct device. The device structure contains the information
456  * that the device model core needs to model the system. Most subsystems,
457  * however, track additional information about the devices they host. As a
458  * result, it is rare for devices to be represented by bare device structures;
459  * instead, that structure, like kobject structures, is usually embedded within
460  * a higher-level representation of the device.
461  */
462 struct device {
463 	struct kobject kobj;
464 	struct device		*parent;
465 
466 	struct device_private	*p;
467 
468 	const char		*init_name; /* initial name of the device */
469 	const struct device_type *type;
470 
471 	struct bus_type	*bus;		/* type of bus device is on */
472 	struct device_driver *driver;	/* which driver has allocated this
473 					   device */
474 	void		*platform_data;	/* Platform specific data, device
475 					   core doesn't touch it */
476 	void		*driver_data;	/* Driver data, set and get with
477 					   dev_set_drvdata/dev_get_drvdata */
478 #ifdef CONFIG_PROVE_LOCKING
479 	struct mutex		lockdep_mutex;
480 #endif
481 	struct mutex		mutex;	/* mutex to synchronize calls to
482 					 * its driver.
483 					 */
484 
485 	struct dev_links_info	links;
486 	struct dev_pm_info	power;
487 	struct dev_pm_domain	*pm_domain;
488 
489 #ifdef CONFIG_ENERGY_MODEL
490 	struct em_perf_domain	*em_pd;
491 #endif
492 
493 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
494 	struct irq_domain	*msi_domain;
495 #endif
496 #ifdef CONFIG_PINCTRL
497 	struct dev_pin_info	*pins;
498 #endif
499 #ifdef CONFIG_GENERIC_MSI_IRQ
500 	raw_spinlock_t		msi_lock;
501 	struct list_head	msi_list;
502 #endif
503 #ifdef CONFIG_DMA_OPS
504 	const struct dma_map_ops *dma_ops;
505 #endif
506 	u64		*dma_mask;	/* dma mask (if dma'able device) */
507 	u64		coherent_dma_mask;/* Like dma_mask, but for
508 					     alloc_coherent mappings as
509 					     not all hardware supports
510 					     64 bit addresses for consistent
511 					     allocations such descriptors. */
512 	u64		bus_dma_limit;	/* upstream dma constraint */
513 	const struct bus_dma_region *dma_range_map;
514 
515 	struct device_dma_parameters *dma_parms;
516 
517 	struct list_head	dma_pools;	/* dma pools (if dma'ble) */
518 
519 #ifdef CONFIG_DMA_DECLARE_COHERENT
520 	struct dma_coherent_mem	*dma_mem; /* internal for coherent mem
521 					     override */
522 #endif
523 #ifdef CONFIG_DMA_CMA
524 	struct cma *cma_area;		/* contiguous memory area for dma
525 					   allocations */
526 #endif
527 	/* arch specific additions */
528 	struct dev_archdata	archdata;
529 
530 	struct device_node	*of_node; /* associated device tree node */
531 	struct fwnode_handle	*fwnode; /* firmware device node */
532 
533 #ifdef CONFIG_NUMA
534 	int		numa_node;	/* NUMA node this device is close to */
535 #endif
536 	dev_t			devt;	/* dev_t, creates the sysfs "dev" */
537 	u32			id;	/* device instance */
538 
539 	spinlock_t		devres_lock;
540 	struct list_head	devres_head;
541 
542 	struct class		*class;
543 	const struct attribute_group **groups;	/* optional groups */
544 
545 	void	(*release)(struct device *dev);
546 	struct iommu_group	*iommu_group;
547 	struct dev_iommu	*iommu;
548 
549 	bool			offline_disabled:1;
550 	bool			offline:1;
551 	bool			of_node_reused:1;
552 	bool			state_synced:1;
553 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
554     defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
555     defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
556 	bool			dma_coherent:1;
557 #endif
558 #ifdef CONFIG_DMA_OPS_BYPASS
559 	bool			dma_ops_bypass : 1;
560 #endif
561 };
562 
563 /**
564  * struct device_link - Device link representation.
565  * @supplier: The device on the supplier end of the link.
566  * @s_node: Hook to the supplier device's list of links to consumers.
567  * @consumer: The device on the consumer end of the link.
568  * @c_node: Hook to the consumer device's list of links to suppliers.
569  * @link_dev: device used to expose link details in sysfs
570  * @status: The state of the link (with respect to the presence of drivers).
571  * @flags: Link flags.
572  * @rpm_active: Whether or not the consumer device is runtime-PM-active.
573  * @kref: Count repeated addition of the same link.
574  * @rm_work: Work structure used for removing the link.
575  * @supplier_preactivated: Supplier has been made active before consumer probe.
576  */
577 struct device_link {
578 	struct device *supplier;
579 	struct list_head s_node;
580 	struct device *consumer;
581 	struct list_head c_node;
582 	struct device link_dev;
583 	enum device_link_state status;
584 	u32 flags;
585 	refcount_t rpm_active;
586 	struct kref kref;
587 	struct work_struct rm_work;
588 	bool supplier_preactivated; /* Owned by consumer probe. */
589 };
590 
kobj_to_dev(struct kobject * kobj)591 static inline struct device *kobj_to_dev(struct kobject *kobj)
592 {
593 	return container_of(kobj, struct device, kobj);
594 }
595 
596 /**
597  * device_iommu_mapped - Returns true when the device DMA is translated
598  *			 by an IOMMU
599  * @dev: Device to perform the check on
600  */
device_iommu_mapped(struct device * dev)601 static inline bool device_iommu_mapped(struct device *dev)
602 {
603 	return (dev->iommu_group != NULL);
604 }
605 
606 /* Get the wakeup routines, which depend on struct device */
607 #include <linux/pm_wakeup.h>
608 
dev_name(const struct device * dev)609 static inline const char *dev_name(const struct device *dev)
610 {
611 	/* Use the init name until the kobject becomes available */
612 	if (dev->init_name)
613 		return dev->init_name;
614 
615 	return kobject_name(&dev->kobj);
616 }
617 
618 /**
619  * dev_bus_name - Return a device's bus/class name, if at all possible
620  * @dev: struct device to get the bus/class name of
621  *
622  * Will return the name of the bus/class the device is attached to.  If it is
623  * not attached to a bus/class, an empty string will be returned.
624  */
dev_bus_name(const struct device * dev)625 static inline const char *dev_bus_name(const struct device *dev)
626 {
627 	return dev->bus ? dev->bus->name : (dev->class ? dev->class->name : "");
628 }
629 
630 __printf(2, 3) int dev_set_name(struct device *dev, const char *name, ...);
631 
632 #ifdef CONFIG_NUMA
dev_to_node(struct device * dev)633 static inline int dev_to_node(struct device *dev)
634 {
635 	return dev->numa_node;
636 }
set_dev_node(struct device * dev,int node)637 static inline void set_dev_node(struct device *dev, int node)
638 {
639 	dev->numa_node = node;
640 }
641 #else
dev_to_node(struct device * dev)642 static inline int dev_to_node(struct device *dev)
643 {
644 	return NUMA_NO_NODE;
645 }
set_dev_node(struct device * dev,int node)646 static inline void set_dev_node(struct device *dev, int node)
647 {
648 }
649 #endif
650 
dev_get_msi_domain(const struct device * dev)651 static inline struct irq_domain *dev_get_msi_domain(const struct device *dev)
652 {
653 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
654 	return dev->msi_domain;
655 #else
656 	return NULL;
657 #endif
658 }
659 
dev_set_msi_domain(struct device * dev,struct irq_domain * d)660 static inline void dev_set_msi_domain(struct device *dev, struct irq_domain *d)
661 {
662 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
663 	dev->msi_domain = d;
664 #endif
665 }
666 
dev_get_drvdata(const struct device * dev)667 static inline void *dev_get_drvdata(const struct device *dev)
668 {
669 	return dev->driver_data;
670 }
671 
dev_set_drvdata(struct device * dev,void * data)672 static inline void dev_set_drvdata(struct device *dev, void *data)
673 {
674 	dev->driver_data = data;
675 }
676 
dev_to_psd(struct device * dev)677 static inline struct pm_subsys_data *dev_to_psd(struct device *dev)
678 {
679 	return dev ? dev->power.subsys_data : NULL;
680 }
681 
dev_get_uevent_suppress(const struct device * dev)682 static inline unsigned int dev_get_uevent_suppress(const struct device *dev)
683 {
684 	return dev->kobj.uevent_suppress;
685 }
686 
dev_set_uevent_suppress(struct device * dev,int val)687 static inline void dev_set_uevent_suppress(struct device *dev, int val)
688 {
689 	dev->kobj.uevent_suppress = val;
690 }
691 
device_is_registered(struct device * dev)692 static inline int device_is_registered(struct device *dev)
693 {
694 	return dev->kobj.state_in_sysfs;
695 }
696 
device_enable_async_suspend(struct device * dev)697 static inline void device_enable_async_suspend(struct device *dev)
698 {
699 	if (!dev->power.is_prepared)
700 		dev->power.async_suspend = true;
701 }
702 
device_disable_async_suspend(struct device * dev)703 static inline void device_disable_async_suspend(struct device *dev)
704 {
705 	if (!dev->power.is_prepared)
706 		dev->power.async_suspend = false;
707 }
708 
device_async_suspend_enabled(struct device * dev)709 static inline bool device_async_suspend_enabled(struct device *dev)
710 {
711 	return !!dev->power.async_suspend;
712 }
713 
device_pm_not_required(struct device * dev)714 static inline bool device_pm_not_required(struct device *dev)
715 {
716 	return dev->power.no_pm;
717 }
718 
device_set_pm_not_required(struct device * dev)719 static inline void device_set_pm_not_required(struct device *dev)
720 {
721 	dev->power.no_pm = true;
722 }
723 
dev_pm_syscore_device(struct device * dev,bool val)724 static inline void dev_pm_syscore_device(struct device *dev, bool val)
725 {
726 #ifdef CONFIG_PM_SLEEP
727 	dev->power.syscore = val;
728 #endif
729 }
730 
dev_pm_set_driver_flags(struct device * dev,u32 flags)731 static inline void dev_pm_set_driver_flags(struct device *dev, u32 flags)
732 {
733 	dev->power.driver_flags = flags;
734 }
735 
dev_pm_test_driver_flags(struct device * dev,u32 flags)736 static inline bool dev_pm_test_driver_flags(struct device *dev, u32 flags)
737 {
738 	return !!(dev->power.driver_flags & flags);
739 }
740 
device_lock(struct device * dev)741 static inline void device_lock(struct device *dev)
742 {
743 	mutex_lock(&dev->mutex);
744 }
745 
device_lock_interruptible(struct device * dev)746 static inline int device_lock_interruptible(struct device *dev)
747 {
748 	return mutex_lock_interruptible(&dev->mutex);
749 }
750 
device_trylock(struct device * dev)751 static inline int device_trylock(struct device *dev)
752 {
753 	return mutex_trylock(&dev->mutex);
754 }
755 
device_unlock(struct device * dev)756 static inline void device_unlock(struct device *dev)
757 {
758 	mutex_unlock(&dev->mutex);
759 }
760 
device_lock_assert(struct device * dev)761 static inline void device_lock_assert(struct device *dev)
762 {
763 	lockdep_assert_held(&dev->mutex);
764 }
765 
dev_of_node(struct device * dev)766 static inline struct device_node *dev_of_node(struct device *dev)
767 {
768 	if (!IS_ENABLED(CONFIG_OF) || !dev)
769 		return NULL;
770 	return dev->of_node;
771 }
772 
dev_has_sync_state(struct device * dev)773 static inline bool dev_has_sync_state(struct device *dev)
774 {
775 	if (!dev)
776 		return false;
777 	if (dev->driver && dev->driver->sync_state)
778 		return true;
779 	if (dev->bus && dev->bus->sync_state)
780 		return true;
781 	return false;
782 }
783 
784 /*
785  * High level routines for use by the bus drivers
786  */
787 int __must_check device_register(struct device *dev);
788 void device_unregister(struct device *dev);
789 void device_initialize(struct device *dev);
790 int __must_check device_add(struct device *dev);
791 void device_del(struct device *dev);
792 int device_for_each_child(struct device *dev, void *data,
793 			  int (*fn)(struct device *dev, void *data));
794 int device_for_each_child_reverse(struct device *dev, void *data,
795 				  int (*fn)(struct device *dev, void *data));
796 struct device *device_find_child(struct device *dev, void *data,
797 				 int (*match)(struct device *dev, void *data));
798 struct device *device_find_child_by_name(struct device *parent,
799 					 const char *name);
800 int device_rename(struct device *dev, const char *new_name);
801 int device_move(struct device *dev, struct device *new_parent,
802 		enum dpm_order dpm_order);
803 int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid);
804 const char *device_get_devnode(struct device *dev, umode_t *mode, kuid_t *uid,
805 			       kgid_t *gid, const char **tmp);
806 int device_is_dependent(struct device *dev, void *target);
807 
device_supports_offline(struct device * dev)808 static inline bool device_supports_offline(struct device *dev)
809 {
810 	return dev->bus && dev->bus->offline && dev->bus->online;
811 }
812 
813 void lock_device_hotplug(void);
814 void unlock_device_hotplug(void);
815 int lock_device_hotplug_sysfs(void);
816 int device_offline(struct device *dev);
817 int device_online(struct device *dev);
818 void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
819 void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
820 void device_set_of_node_from_dev(struct device *dev, const struct device *dev2);
821 
dev_num_vf(struct device * dev)822 static inline int dev_num_vf(struct device *dev)
823 {
824 	if (dev->bus && dev->bus->num_vf)
825 		return dev->bus->num_vf(dev);
826 	return 0;
827 }
828 
829 /*
830  * Root device objects for grouping under /sys/devices
831  */
832 struct device *__root_device_register(const char *name, struct module *owner);
833 
834 /* This is a macro to avoid include problems with THIS_MODULE */
835 #define root_device_register(name) \
836 	__root_device_register(name, THIS_MODULE)
837 
838 void root_device_unregister(struct device *root);
839 
dev_get_platdata(const struct device * dev)840 static inline void *dev_get_platdata(const struct device *dev)
841 {
842 	return dev->platform_data;
843 }
844 
845 /*
846  * Manual binding of a device to driver. See drivers/base/bus.c
847  * for information on use.
848  */
849 int __must_check device_bind_driver(struct device *dev);
850 void device_release_driver(struct device *dev);
851 int  __must_check device_attach(struct device *dev);
852 int __must_check driver_attach(struct device_driver *drv);
853 void device_initial_probe(struct device *dev);
854 int __must_check device_reprobe(struct device *dev);
855 
856 bool device_is_bound(struct device *dev);
857 
858 /*
859  * Easy functions for dynamically creating devices on the fly
860  */
861 __printf(5, 6) struct device *
862 device_create(struct class *cls, struct device *parent, dev_t devt,
863 	      void *drvdata, const char *fmt, ...);
864 __printf(6, 7) struct device *
865 device_create_with_groups(struct class *cls, struct device *parent, dev_t devt,
866 			  void *drvdata, const struct attribute_group **groups,
867 			  const char *fmt, ...);
868 void device_destroy(struct class *cls, dev_t devt);
869 
870 int __must_check device_add_groups(struct device *dev,
871 				   const struct attribute_group **groups);
872 void device_remove_groups(struct device *dev,
873 			  const struct attribute_group **groups);
874 
device_add_group(struct device * dev,const struct attribute_group * grp)875 static inline int __must_check device_add_group(struct device *dev,
876 					const struct attribute_group *grp)
877 {
878 	const struct attribute_group *groups[] = { grp, NULL };
879 
880 	return device_add_groups(dev, groups);
881 }
882 
device_remove_group(struct device * dev,const struct attribute_group * grp)883 static inline void device_remove_group(struct device *dev,
884 				       const struct attribute_group *grp)
885 {
886 	const struct attribute_group *groups[] = { grp, NULL };
887 
888 	return device_remove_groups(dev, groups);
889 }
890 
891 int __must_check devm_device_add_groups(struct device *dev,
892 					const struct attribute_group **groups);
893 void devm_device_remove_groups(struct device *dev,
894 			       const struct attribute_group **groups);
895 int __must_check devm_device_add_group(struct device *dev,
896 				       const struct attribute_group *grp);
897 void devm_device_remove_group(struct device *dev,
898 			      const struct attribute_group *grp);
899 
900 /*
901  * Platform "fixup" functions - allow the platform to have their say
902  * about devices and actions that the general device layer doesn't
903  * know about.
904  */
905 /* Notify platform of device discovery */
906 extern int (*platform_notify)(struct device *dev);
907 
908 extern int (*platform_notify_remove)(struct device *dev);
909 
910 
911 /*
912  * get_device - atomically increment the reference count for the device.
913  *
914  */
915 struct device *get_device(struct device *dev);
916 void put_device(struct device *dev);
917 bool kill_device(struct device *dev);
918 
919 #ifdef CONFIG_DEVTMPFS
920 int devtmpfs_mount(void);
921 #else
devtmpfs_mount(void)922 static inline int devtmpfs_mount(void) { return 0; }
923 #endif
924 
925 /* drivers/base/power/shutdown.c */
926 void device_shutdown(void);
927 
928 /* debugging and troubleshooting/diagnostic helpers. */
929 const char *dev_driver_string(const struct device *dev);
930 
931 /* Device links interface. */
932 struct device_link *device_link_add(struct device *consumer,
933 				    struct device *supplier, u32 flags);
934 void device_link_del(struct device_link *link);
935 void device_link_remove(void *consumer, struct device *supplier);
936 void device_links_supplier_sync_state_pause(void);
937 void device_links_supplier_sync_state_resume(void);
938 
939 extern __printf(3, 4)
940 int dev_err_probe(const struct device *dev, int err, const char *fmt, ...);
941 
942 /* Create alias, so I can be autoloaded. */
943 #define MODULE_ALIAS_CHARDEV(major,minor) \
944 	MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor))
945 #define MODULE_ALIAS_CHARDEV_MAJOR(major) \
946 	MODULE_ALIAS("char-major-" __stringify(major) "-*")
947 
948 #ifdef CONFIG_SYSFS_DEPRECATED
949 extern long sysfs_deprecated;
950 #else
951 #define sysfs_deprecated 0
952 #endif
953 
954 #endif /* _DEVICE_H_ */
955