• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *	pci.h
4  *
5  *	PCI defines and function prototypes
6  *	Copyright 1994, Drew Eckhardt
7  *	Copyright 1997--1999 Martin Mares <mj@ucw.cz>
8  *
9  *	PCI Express ASPM defines and function prototypes
10  *	Copyright (c) 2007 Intel Corp.
11  *		Zhang Yanmin (yanmin.zhang@intel.com)
12  *		Shaohua Li (shaohua.li@intel.com)
13  *
14  *	For more information, please consult the following manuals (look at
15  *	http://www.pcisig.com/ for how to get them):
16  *
17  *	PCI BIOS Specification
18  *	PCI Local Bus Specification
19  *	PCI to PCI Bridge Specification
20  *	PCI Express Specification
21  *	PCI System Design Guide
22  */
23 #ifndef LINUX_PCI_H
24 #define LINUX_PCI_H
25 
26 #include <linux/args.h>
27 #include <linux/mod_devicetable.h>
28 
29 #include <linux/types.h>
30 #include <linux/init.h>
31 #include <linux/ioport.h>
32 #include <linux/list.h>
33 #include <linux/compiler.h>
34 #include <linux/errno.h>
35 #include <linux/kobject.h>
36 #include <linux/atomic.h>
37 #include <linux/device.h>
38 #include <linux/interrupt.h>
39 #include <linux/io.h>
40 #include <linux/resource_ext.h>
41 #include <linux/msi_api.h>
42 #include <uapi/linux/pci.h>
43 
44 #include <linux/pci_ids.h>
45 #include <linux/android_kabi.h>
46 
47 #define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY  | \
48 			       PCI_STATUS_SIG_SYSTEM_ERROR | \
49 			       PCI_STATUS_REC_MASTER_ABORT | \
50 			       PCI_STATUS_REC_TARGET_ABORT | \
51 			       PCI_STATUS_SIG_TARGET_ABORT | \
52 			       PCI_STATUS_PARITY)
53 
54 /* Number of reset methods used in pci_reset_fn_methods array in pci.c */
55 #define PCI_NUM_RESET_METHODS 7
56 
57 #define PCI_RESET_PROBE		true
58 #define PCI_RESET_DO_RESET	false
59 
60 /*
61  * The PCI interface treats multi-function devices as independent
62  * devices.  The slot/function address of each device is encoded
63  * in a single byte as follows:
64  *
65  *	7:3 = slot
66  *	2:0 = function
67  *
68  * PCI_DEVFN(), PCI_SLOT(), and PCI_FUNC() are defined in uapi/linux/pci.h.
69  * In the interest of not exposing interfaces to user-space unnecessarily,
70  * the following kernel-only defines are being added here.
71  */
72 #define PCI_DEVID(bus, devfn)	((((u16)(bus)) << 8) | (devfn))
73 /* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */
74 #define PCI_BUS_NUM(x) (((x) >> 8) & 0xff)
75 
76 /* pci_slot represents a physical slot */
77 struct pci_slot {
78 	struct pci_bus		*bus;		/* Bus this slot is on */
79 	struct list_head	list;		/* Node in list of slots */
80 	struct hotplug_slot	*hotplug;	/* Hotplug info (move here) */
81 	unsigned char		number;		/* PCI_SLOT(pci_dev->devfn) */
82 	struct kobject		kobj;
83 };
84 
pci_slot_name(const struct pci_slot * slot)85 static inline const char *pci_slot_name(const struct pci_slot *slot)
86 {
87 	return kobject_name(&slot->kobj);
88 }
89 
90 /* File state for mmap()s on /proc/bus/pci/X/Y */
91 enum pci_mmap_state {
92 	pci_mmap_io,
93 	pci_mmap_mem
94 };
95 
96 /* For PCI devices, the region numbers are assigned this way: */
97 enum {
98 	/* #0-5: standard PCI resources */
99 	PCI_STD_RESOURCES,
100 	PCI_STD_RESOURCE_END = PCI_STD_RESOURCES + PCI_STD_NUM_BARS - 1,
101 
102 	/* #6: expansion ROM resource */
103 	PCI_ROM_RESOURCE,
104 
105 	/* Device-specific resources */
106 #ifdef CONFIG_PCI_IOV
107 	PCI_IOV_RESOURCES,
108 	PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1,
109 #endif
110 
111 /* PCI-to-PCI (P2P) bridge windows */
112 #define PCI_BRIDGE_IO_WINDOW		(PCI_BRIDGE_RESOURCES + 0)
113 #define PCI_BRIDGE_MEM_WINDOW		(PCI_BRIDGE_RESOURCES + 1)
114 #define PCI_BRIDGE_PREF_MEM_WINDOW	(PCI_BRIDGE_RESOURCES + 2)
115 
116 /* CardBus bridge windows */
117 #define PCI_CB_BRIDGE_IO_0_WINDOW	(PCI_BRIDGE_RESOURCES + 0)
118 #define PCI_CB_BRIDGE_IO_1_WINDOW	(PCI_BRIDGE_RESOURCES + 1)
119 #define PCI_CB_BRIDGE_MEM_0_WINDOW	(PCI_BRIDGE_RESOURCES + 2)
120 #define PCI_CB_BRIDGE_MEM_1_WINDOW	(PCI_BRIDGE_RESOURCES + 3)
121 
122 /* Total number of bridge resources for P2P and CardBus */
123 #define PCI_BRIDGE_RESOURCE_NUM 4
124 
125 	/* Resources assigned to buses behind the bridge */
126 	PCI_BRIDGE_RESOURCES,
127 	PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES +
128 				  PCI_BRIDGE_RESOURCE_NUM - 1,
129 
130 	/* Total resources associated with a PCI device */
131 	PCI_NUM_RESOURCES,
132 
133 	/* Preserve this for compatibility */
134 	DEVICE_COUNT_RESOURCE = PCI_NUM_RESOURCES,
135 };
136 
137 /**
138  * enum pci_interrupt_pin - PCI INTx interrupt values
139  * @PCI_INTERRUPT_UNKNOWN: Unknown or unassigned interrupt
140  * @PCI_INTERRUPT_INTA: PCI INTA pin
141  * @PCI_INTERRUPT_INTB: PCI INTB pin
142  * @PCI_INTERRUPT_INTC: PCI INTC pin
143  * @PCI_INTERRUPT_INTD: PCI INTD pin
144  *
145  * Corresponds to values for legacy PCI INTx interrupts, as can be found in the
146  * PCI_INTERRUPT_PIN register.
147  */
148 enum pci_interrupt_pin {
149 	PCI_INTERRUPT_UNKNOWN,
150 	PCI_INTERRUPT_INTA,
151 	PCI_INTERRUPT_INTB,
152 	PCI_INTERRUPT_INTC,
153 	PCI_INTERRUPT_INTD,
154 };
155 
156 /* The number of legacy PCI INTx interrupts */
157 #define PCI_NUM_INTX	4
158 
159 /*
160  * Reading from a device that doesn't respond typically returns ~0.  A
161  * successful read from a device may also return ~0, so you need additional
162  * information to reliably identify errors.
163  */
164 #define PCI_ERROR_RESPONSE		(~0ULL)
165 #define PCI_SET_ERROR_RESPONSE(val)	(*(val) = ((typeof(*(val))) PCI_ERROR_RESPONSE))
166 #define PCI_POSSIBLE_ERROR(val)		((val) == ((typeof(val)) PCI_ERROR_RESPONSE))
167 
168 /*
169  * pci_power_t values must match the bits in the Capabilities PME_Support
170  * and Control/Status PowerState fields in the Power Management capability.
171  */
172 typedef int __bitwise pci_power_t;
173 
174 #define PCI_D0		((pci_power_t __force) 0)
175 #define PCI_D1		((pci_power_t __force) 1)
176 #define PCI_D2		((pci_power_t __force) 2)
177 #define PCI_D3hot	((pci_power_t __force) 3)
178 #define PCI_D3cold	((pci_power_t __force) 4)
179 #define PCI_UNKNOWN	((pci_power_t __force) 5)
180 #define PCI_POWER_ERROR	((pci_power_t __force) -1)
181 
182 /* Remember to update this when the list above changes! */
183 extern const char *pci_power_names[];
184 
pci_power_name(pci_power_t state)185 static inline const char *pci_power_name(pci_power_t state)
186 {
187 	return pci_power_names[1 + (__force int) state];
188 }
189 
190 /**
191  * typedef pci_channel_state_t
192  *
193  * The pci_channel state describes connectivity between the CPU and
194  * the PCI device.  If some PCI bus between here and the PCI device
195  * has crashed or locked up, this info is reflected here.
196  */
197 typedef unsigned int __bitwise pci_channel_state_t;
198 
199 enum {
200 	/* I/O channel is in normal state */
201 	pci_channel_io_normal = (__force pci_channel_state_t) 1,
202 
203 	/* I/O to channel is blocked */
204 	pci_channel_io_frozen = (__force pci_channel_state_t) 2,
205 
206 	/* PCI card is dead */
207 	pci_channel_io_perm_failure = (__force pci_channel_state_t) 3,
208 };
209 
210 typedef unsigned int __bitwise pcie_reset_state_t;
211 
212 enum pcie_reset_state {
213 	/* Reset is NOT asserted (Use to deassert reset) */
214 	pcie_deassert_reset = (__force pcie_reset_state_t) 1,
215 
216 	/* Use #PERST to reset PCIe device */
217 	pcie_warm_reset = (__force pcie_reset_state_t) 2,
218 
219 	/* Use PCIe Hot Reset to reset device */
220 	pcie_hot_reset = (__force pcie_reset_state_t) 3
221 };
222 
223 typedef unsigned short __bitwise pci_dev_flags_t;
224 enum pci_dev_flags {
225 	/* INTX_DISABLE in PCI_COMMAND register disables MSI too */
226 	PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) (1 << 0),
227 	/* Device configuration is irrevocably lost if disabled into D3 */
228 	PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) (1 << 1),
229 	/* Provide indication device is assigned by a Virtual Machine Manager */
230 	PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) (1 << 2),
231 	/* Flag for quirk use to store if quirk-specific ACS is enabled */
232 	PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = (__force pci_dev_flags_t) (1 << 3),
233 	/* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */
234 	PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5),
235 	/* Do not use bus resets for device */
236 	PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6),
237 	/* Do not use PM reset even if device advertises NoSoftRst- */
238 	PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
239 	/* Get VPD from function 0 VPD */
240 	PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8),
241 	/* A non-root bridge where translation occurs, stop alias search here */
242 	PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9),
243 	/* Do not use FLR even if device advertises PCI_AF_CAP */
244 	PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10),
245 	/* Don't use Relaxed Ordering for TLPs directed at this device */
246 	PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 11),
247 	/* Device does honor MSI masking despite saying otherwise */
248 	PCI_DEV_FLAGS_HAS_MSI_MASKING = (__force pci_dev_flags_t) (1 << 12),
249 };
250 
251 enum pci_irq_reroute_variant {
252 	INTEL_IRQ_REROUTE_VARIANT = 1,
253 	MAX_IRQ_REROUTE_VARIANTS = 3
254 };
255 
256 typedef unsigned short __bitwise pci_bus_flags_t;
257 enum pci_bus_flags {
258 	PCI_BUS_FLAGS_NO_MSI	= (__force pci_bus_flags_t) 1,
259 	PCI_BUS_FLAGS_NO_MMRBC	= (__force pci_bus_flags_t) 2,
260 	PCI_BUS_FLAGS_NO_AERSID	= (__force pci_bus_flags_t) 4,
261 	PCI_BUS_FLAGS_NO_EXTCFG	= (__force pci_bus_flags_t) 8,
262 };
263 
264 /* Values from Link Status register, PCIe r3.1, sec 7.8.8 */
265 enum pcie_link_width {
266 	PCIE_LNK_WIDTH_RESRV	= 0x00,
267 	PCIE_LNK_X1		= 0x01,
268 	PCIE_LNK_X2		= 0x02,
269 	PCIE_LNK_X4		= 0x04,
270 	PCIE_LNK_X8		= 0x08,
271 	PCIE_LNK_X12		= 0x0c,
272 	PCIE_LNK_X16		= 0x10,
273 	PCIE_LNK_X32		= 0x20,
274 	PCIE_LNK_WIDTH_UNKNOWN	= 0xff,
275 };
276 
277 /* See matching string table in pci_speed_string() */
278 enum pci_bus_speed {
279 	PCI_SPEED_33MHz			= 0x00,
280 	PCI_SPEED_66MHz			= 0x01,
281 	PCI_SPEED_66MHz_PCIX		= 0x02,
282 	PCI_SPEED_100MHz_PCIX		= 0x03,
283 	PCI_SPEED_133MHz_PCIX		= 0x04,
284 	PCI_SPEED_66MHz_PCIX_ECC	= 0x05,
285 	PCI_SPEED_100MHz_PCIX_ECC	= 0x06,
286 	PCI_SPEED_133MHz_PCIX_ECC	= 0x07,
287 	PCI_SPEED_66MHz_PCIX_266	= 0x09,
288 	PCI_SPEED_100MHz_PCIX_266	= 0x0a,
289 	PCI_SPEED_133MHz_PCIX_266	= 0x0b,
290 	AGP_UNKNOWN			= 0x0c,
291 	AGP_1X				= 0x0d,
292 	AGP_2X				= 0x0e,
293 	AGP_4X				= 0x0f,
294 	AGP_8X				= 0x10,
295 	PCI_SPEED_66MHz_PCIX_533	= 0x11,
296 	PCI_SPEED_100MHz_PCIX_533	= 0x12,
297 	PCI_SPEED_133MHz_PCIX_533	= 0x13,
298 	PCIE_SPEED_2_5GT		= 0x14,
299 	PCIE_SPEED_5_0GT		= 0x15,
300 	PCIE_SPEED_8_0GT		= 0x16,
301 	PCIE_SPEED_16_0GT		= 0x17,
302 	PCIE_SPEED_32_0GT		= 0x18,
303 	PCIE_SPEED_64_0GT		= 0x19,
304 	PCI_SPEED_UNKNOWN		= 0xff,
305 };
306 
307 enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev);
308 enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
309 
310 struct pci_vpd {
311 	struct mutex	lock;
312 	unsigned int	len;
313 	u8		cap;
314 };
315 
316 struct irq_affinity;
317 struct pcie_link_state;
318 struct pci_sriov;
319 struct pci_p2pdma;
320 struct rcec_ea;
321 
322 /* The pci_dev structure describes PCI devices */
323 struct pci_dev {
324 	struct list_head bus_list;	/* Node in per-bus list */
325 	struct pci_bus	*bus;		/* Bus this device is on */
326 	struct pci_bus	*subordinate;	/* Bus this device bridges to */
327 
328 	void		*sysdata;	/* Hook for sys-specific extension */
329 	struct proc_dir_entry *procent;	/* Device entry in /proc/bus/pci */
330 	struct pci_slot	*slot;		/* Physical slot this device is in */
331 
332 	unsigned int	devfn;		/* Encoded device & function index */
333 	unsigned short	vendor;
334 	unsigned short	device;
335 	unsigned short	subsystem_vendor;
336 	unsigned short	subsystem_device;
337 	unsigned int	class;		/* 3 bytes: (base,sub,prog-if) */
338 	u8		revision;	/* PCI revision, low byte of class word */
339 	u8		hdr_type;	/* PCI header type (`multi' flag masked out) */
340 #ifdef CONFIG_PCIEAER
341 	u16		aer_cap;	/* AER capability offset */
342 	struct aer_stats *aer_stats;	/* AER stats for this device */
343 #endif
344 #ifdef CONFIG_PCIEPORTBUS
345 	struct rcec_ea	*rcec_ea;	/* RCEC cached endpoint association */
346 	struct pci_dev  *rcec;          /* Associated RCEC device */
347 #endif
348 	u32		devcap;		/* PCIe Device Capabilities */
349 	u8		pcie_cap;	/* PCIe capability offset */
350 	u8		msi_cap;	/* MSI capability offset */
351 	u8		msix_cap;	/* MSI-X capability offset */
352 	u8		pcie_mpss:3;	/* PCIe Max Payload Size Supported */
353 	u8		rom_base_reg;	/* Config register controlling ROM */
354 	u8		pin;		/* Interrupt pin this device uses */
355 	u16		pcie_flags_reg;	/* Cached PCIe Capabilities Register */
356 	unsigned long	*dma_alias_mask;/* Mask of enabled devfn aliases */
357 
358 	struct pci_driver *driver;	/* Driver bound to this device */
359 	u64		dma_mask;	/* Mask of the bits of bus address this
360 					   device implements.  Normally this is
361 					   0xffffffff.  You only need to change
362 					   this if your device has broken DMA
363 					   or supports 64-bit transfers.  */
364 
365 	struct device_dma_parameters dma_parms;
366 
367 	pci_power_t	current_state;	/* Current operating state. In ACPI,
368 					   this is D0-D3, D0 being fully
369 					   functional, and D3 being off. */
370 	u8		pm_cap;		/* PM capability offset */
371 	unsigned int	imm_ready:1;	/* Supports Immediate Readiness */
372 	unsigned int	pme_support:5;	/* Bitmask of states from which PME#
373 					   can be generated */
374 	unsigned int	pme_poll:1;	/* Poll device's PME status bit */
375 	unsigned int	d1_support:1;	/* Low power state D1 is supported */
376 	unsigned int	d2_support:1;	/* Low power state D2 is supported */
377 	unsigned int	no_d1d2:1;	/* D1 and D2 are forbidden */
378 	unsigned int	no_d3cold:1;	/* D3cold is forbidden */
379 	unsigned int	bridge_d3:1;	/* Allow D3 for bridge */
380 	unsigned int	d3cold_allowed:1;	/* D3cold is allowed by user */
381 	unsigned int	mmio_always_on:1;	/* Disallow turning off io/mem
382 						   decoding during BAR sizing */
383 	unsigned int	wakeup_prepared:1;
384 	unsigned int	skip_bus_pm:1;	/* Internal: Skip bus-level PM */
385 	unsigned int	ignore_hotplug:1;	/* Ignore hotplug events */
386 	unsigned int	hotplug_user_indicators:1; /* SlotCtl indicators
387 						      controlled exclusively by
388 						      user sysfs */
389 	unsigned int	clear_retrain_link:1;	/* Need to clear Retrain Link
390 						   bit manually */
391 	unsigned int	d3hot_delay;	/* D3hot->D0 transition time in ms */
392 	unsigned int	d3cold_delay;	/* D3cold->D0 transition time in ms */
393 
394 #ifdef CONFIG_PCIEASPM
395 	struct pcie_link_state	*link_state;	/* ASPM link state */
396 	u16		l1ss;		/* L1SS Capability pointer */
397 	unsigned int	ltr_path:1;	/* Latency Tolerance Reporting
398 					   supported from root to here */
399 #endif
400 	unsigned int	pasid_no_tlp:1;		/* PASID works without TLP Prefix */
401 	unsigned int	eetlp_prefix_path:1;	/* End-to-End TLP Prefix */
402 
403 	pci_channel_state_t error_state;	/* Current connectivity state */
404 	struct device	dev;			/* Generic device interface */
405 
406 	int		cfg_size;		/* Size of config space */
407 
408 	/*
409 	 * Instead of touching interrupt line and base address registers
410 	 * directly, use the values stored here. They might be different!
411 	 */
412 	unsigned int	irq;
413 	struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
414 	struct resource driver_exclusive_resource;	 /* driver exclusive resource ranges */
415 
416 	bool		match_driver;		/* Skip attaching driver */
417 
418 	unsigned int	transparent:1;		/* Subtractive decode bridge */
419 	unsigned int	io_window:1;		/* Bridge has I/O window */
420 	unsigned int	pref_window:1;		/* Bridge has pref mem window */
421 	unsigned int	pref_64_window:1;	/* Pref mem window is 64-bit */
422 	unsigned int	multifunction:1;	/* Multi-function device */
423 
424 	unsigned int	is_busmaster:1;		/* Is busmaster */
425 	unsigned int	no_msi:1;		/* May not use MSI */
426 	unsigned int	no_64bit_msi:1;		/* May only use 32-bit MSIs */
427 	unsigned int	block_cfg_access:1;	/* Config space access blocked */
428 	unsigned int	broken_parity_status:1;	/* Generates false positive parity */
429 	unsigned int	irq_reroute_variant:2;	/* Needs IRQ rerouting variant */
430 	unsigned int	msi_enabled:1;
431 	unsigned int	msix_enabled:1;
432 	unsigned int	ari_enabled:1;		/* ARI forwarding */
433 	unsigned int	ats_enabled:1;		/* Address Translation Svc */
434 	unsigned int	pasid_enabled:1;	/* Process Address Space ID */
435 	unsigned int	pri_enabled:1;		/* Page Request Interface */
436 	unsigned int	is_managed:1;		/* Managed via devres */
437 	unsigned int	is_msi_managed:1;	/* MSI release via devres installed */
438 	unsigned int	needs_freset:1;		/* Requires fundamental reset */
439 	unsigned int	state_saved:1;
440 	unsigned int	is_physfn:1;
441 	unsigned int	is_virtfn:1;
442 	unsigned int	is_hotplug_bridge:1;
443 	unsigned int	shpc_managed:1;		/* SHPC owned by shpchp */
444 	unsigned int	is_thunderbolt:1;	/* Thunderbolt controller */
445 	/*
446 	 * Devices marked being untrusted are the ones that can potentially
447 	 * execute DMA attacks and similar. They are typically connected
448 	 * through external ports such as Thunderbolt but not limited to
449 	 * that. When an IOMMU is enabled they should be getting full
450 	 * mappings to make sure they cannot access arbitrary memory.
451 	 */
452 	unsigned int	untrusted:1;
453 	/*
454 	 * Info from the platform, e.g., ACPI or device tree, may mark a
455 	 * device as "external-facing".  An external-facing device is
456 	 * itself internal but devices downstream from it are external.
457 	 */
458 	unsigned int	external_facing:1;
459 	unsigned int	broken_intx_masking:1;	/* INTx masking can't be used */
460 	unsigned int	io_window_1k:1;		/* Intel bridge 1K I/O windows */
461 	unsigned int	irq_managed:1;
462 	unsigned int	non_compliant_bars:1;	/* Broken BARs; ignore them */
463 	unsigned int	is_probed:1;		/* Device probing in progress */
464 	unsigned int	link_active_reporting:1;/* Device capable of reporting link active */
465 	unsigned int	no_vf_scan:1;		/* Don't scan for VFs after IOV enablement */
466 	unsigned int	no_command_memory:1;	/* No PCI_COMMAND_MEMORY */
467 	unsigned int	rom_bar_overlap:1;	/* ROM BAR disable broken */
468 	unsigned int	rom_attr_enabled:1;	/* Display of ROM attribute enabled? */
469 	pci_dev_flags_t dev_flags;
470 	atomic_t	enable_cnt;	/* pci_enable_device has been called */
471 
472 	spinlock_t	pcie_cap_lock;		/* Protects RMW ops in capability accessors */
473 	u32		saved_config_space[16]; /* Config space saved at suspend time */
474 	struct hlist_head saved_cap_space;
475 	struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
476 	struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
477 
478 #ifdef CONFIG_HOTPLUG_PCI_PCIE
479 	unsigned int	broken_cmd_compl:1;	/* No compl for some cmds */
480 #endif
481 #ifdef CONFIG_PCIE_PTM
482 	u16		ptm_cap;		/* PTM Capability */
483 	unsigned int	ptm_root:1;
484 	unsigned int	ptm_enabled:1;
485 	u8		ptm_granularity;
486 #endif
487 #ifdef CONFIG_PCI_MSI
488 	void __iomem	*msix_base;
489 	raw_spinlock_t	msi_lock;
490 #endif
491 	struct pci_vpd	vpd;
492 #ifdef CONFIG_PCIE_DPC
493 	u16		dpc_cap;
494 	unsigned int	dpc_rp_extensions:1;
495 	u8		dpc_rp_log_size;
496 #endif
497 #ifdef CONFIG_PCI_ATS
498 	union {
499 		struct pci_sriov	*sriov;		/* PF: SR-IOV info */
500 		struct pci_dev		*physfn;	/* VF: related PF */
501 	};
502 	u16		ats_cap;	/* ATS Capability offset */
503 	u8		ats_stu;	/* ATS Smallest Translation Unit */
504 #endif
505 #ifdef CONFIG_PCI_PRI
506 	u16		pri_cap;	/* PRI Capability offset */
507 	u32		pri_reqs_alloc; /* Number of PRI requests allocated */
508 	unsigned int	pasid_required:1; /* PRG Response PASID Required */
509 #endif
510 #ifdef CONFIG_PCI_PASID
511 	u16		pasid_cap;	/* PASID Capability offset */
512 	u16		pasid_features;
513 #endif
514 #ifdef CONFIG_PCI_P2PDMA
515 	struct pci_p2pdma __rcu *p2pdma;
516 #endif
517 #ifdef CONFIG_PCI_DOE
518 	struct xarray	doe_mbs;	/* Data Object Exchange mailboxes */
519 #endif
520 	u16		acs_cap;	/* ACS Capability offset */
521 	phys_addr_t	rom;		/* Physical address if not from BAR */
522 	size_t		romlen;		/* Length if not from BAR */
523 	/*
524 	 * Driver name to force a match.  Do not set directly, because core
525 	 * frees it.  Use driver_set_override() to set or clear it.
526 	 */
527 	const char	*driver_override;
528 
529 	unsigned long	priv_flags;	/* Private flags for the PCI driver */
530 
531 	/* These methods index pci_reset_fn_methods[] */
532 	u8 reset_methods[PCI_NUM_RESET_METHODS]; /* In priority order */
533 
534 	ANDROID_KABI_RESERVE(1);
535 	ANDROID_KABI_RESERVE(2);
536 	ANDROID_KABI_RESERVE(3);
537 	ANDROID_KABI_RESERVE(4);
538 };
539 
pci_physfn(struct pci_dev * dev)540 static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
541 {
542 #ifdef CONFIG_PCI_IOV
543 	if (dev->is_virtfn)
544 		dev = dev->physfn;
545 #endif
546 	return dev;
547 }
548 
549 struct pci_dev *pci_alloc_dev(struct pci_bus *bus);
550 
551 #define	to_pci_dev(n) container_of(n, struct pci_dev, dev)
552 #define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
553 
pci_channel_offline(struct pci_dev * pdev)554 static inline int pci_channel_offline(struct pci_dev *pdev)
555 {
556 	return (pdev->error_state != pci_channel_io_normal);
557 }
558 
559 /*
560  * Currently in ACPI spec, for each PCI host bridge, PCI Segment
561  * Group number is limited to a 16-bit value, therefore (int)-1 is
562  * not a valid PCI domain number, and can be used as a sentinel
563  * value indicating ->domain_nr is not set by the driver (and
564  * CONFIG_PCI_DOMAINS_GENERIC=y archs will set it with
565  * pci_bus_find_domain_nr()).
566  */
567 #define PCI_DOMAIN_NR_NOT_SET (-1)
568 
569 struct pci_host_bridge {
570 	struct device	dev;
571 	struct pci_bus	*bus;		/* Root bus */
572 	struct pci_ops	*ops;
573 	struct pci_ops	*child_ops;
574 	void		*sysdata;
575 	int		busnr;
576 	int		domain_nr;
577 	struct list_head windows;	/* resource_entry */
578 	struct list_head dma_ranges;	/* dma ranges resource list */
579 	u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* Platform IRQ swizzler */
580 	int (*map_irq)(const struct pci_dev *, u8, u8);
581 	void (*release_fn)(struct pci_host_bridge *);
582 	void		*release_data;
583 	unsigned int	ignore_reset_delay:1;	/* For entire hierarchy */
584 	unsigned int	no_ext_tags:1;		/* No Extended Tags */
585 	unsigned int	no_inc_mrrs:1;		/* No Increase MRRS */
586 	unsigned int	native_aer:1;		/* OS may use PCIe AER */
587 	unsigned int	native_pcie_hotplug:1;	/* OS may use PCIe hotplug */
588 	unsigned int	native_shpc_hotplug:1;	/* OS may use SHPC hotplug */
589 	unsigned int	native_pme:1;		/* OS may use PCIe PME */
590 	unsigned int	native_ltr:1;		/* OS may use PCIe LTR */
591 	unsigned int	native_dpc:1;		/* OS may use PCIe DPC */
592 	unsigned int	native_cxl_error:1;	/* OS may use CXL RAS/Events */
593 	unsigned int	preserve_config:1;	/* Preserve FW resource setup */
594 	unsigned int	size_windows:1;		/* Enable root bus sizing */
595 	unsigned int	msi_domain:1;		/* Bridge wants MSI domain */
596 
597 	/* Resource alignment requirements */
598 	resource_size_t (*align_resource)(struct pci_dev *dev,
599 			const struct resource *res,
600 			resource_size_t start,
601 			resource_size_t size,
602 			resource_size_t align);
603 
604 	ANDROID_KABI_RESERVE(1);
605 	ANDROID_KABI_RESERVE(2);
606 
607 	unsigned long	private[] ____cacheline_aligned;
608 };
609 
610 #define	to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev)
611 
pci_host_bridge_priv(struct pci_host_bridge * bridge)612 static inline void *pci_host_bridge_priv(struct pci_host_bridge *bridge)
613 {
614 	return (void *)bridge->private;
615 }
616 
pci_host_bridge_from_priv(void * priv)617 static inline struct pci_host_bridge *pci_host_bridge_from_priv(void *priv)
618 {
619 	return container_of(priv, struct pci_host_bridge, private);
620 }
621 
622 struct pci_host_bridge *pci_alloc_host_bridge(size_t priv);
623 struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev,
624 						   size_t priv);
625 void pci_free_host_bridge(struct pci_host_bridge *bridge);
626 struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus);
627 
628 void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
629 				 void (*release_fn)(struct pci_host_bridge *),
630 				 void *release_data);
631 
632 int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge);
633 
634 /*
635  * The first PCI_BRIDGE_RESOURCE_NUM PCI bus resources (those that correspond
636  * to P2P or CardBus bridge windows) go in a table.  Additional ones (for
637  * buses below host bridges or subtractive decode bridges) go in the list.
638  * Use pci_bus_for_each_resource() to iterate through all the resources.
639  */
640 
641 /*
642  * PCI_SUBTRACTIVE_DECODE means the bridge forwards the window implicitly
643  * and there's no way to program the bridge with the details of the window.
644  * This does not apply to ACPI _CRS windows, even with the _DEC subtractive-
645  * decode bit set, because they are explicit and can be programmed with _SRS.
646  */
647 #define PCI_SUBTRACTIVE_DECODE	0x1
648 
649 struct pci_bus_resource {
650 	struct list_head	list;
651 	struct resource		*res;
652 	unsigned int		flags;
653 };
654 
655 #define PCI_REGION_FLAG_MASK	0x0fU	/* These bits of resource flags tell us the PCI region flags */
656 
657 struct pci_bus {
658 	struct list_head node;		/* Node in list of buses */
659 	struct pci_bus	*parent;	/* Parent bus this bridge is on */
660 	struct list_head children;	/* List of child buses */
661 	struct list_head devices;	/* List of devices on this bus */
662 	struct pci_dev	*self;		/* Bridge device as seen by parent */
663 	struct list_head slots;		/* List of slots on this bus;
664 					   protected by pci_slot_mutex */
665 	struct resource *resource[PCI_BRIDGE_RESOURCE_NUM];
666 	struct list_head resources;	/* Address space routed to this bus */
667 	struct resource busn_res;	/* Bus numbers routed to this bus */
668 
669 	struct pci_ops	*ops;		/* Configuration access functions */
670 	void		*sysdata;	/* Hook for sys-specific extension */
671 	struct proc_dir_entry *procdir;	/* Directory entry in /proc/bus/pci */
672 
673 	unsigned char	number;		/* Bus number */
674 	unsigned char	primary;	/* Number of primary bridge */
675 	unsigned char	max_bus_speed;	/* enum pci_bus_speed */
676 	unsigned char	cur_bus_speed;	/* enum pci_bus_speed */
677 #ifdef CONFIG_PCI_DOMAINS_GENERIC
678 	int		domain_nr;
679 #endif
680 
681 	char		name[48];
682 
683 	unsigned short	bridge_ctl;	/* Manage NO_ISA/FBB/et al behaviors */
684 	pci_bus_flags_t bus_flags;	/* Inherited by child buses */
685 	struct device		*bridge;
686 	struct device		dev;
687 	struct bin_attribute	*legacy_io;	/* Legacy I/O for this bus */
688 	struct bin_attribute	*legacy_mem;	/* Legacy mem */
689 	unsigned int		is_added:1;
690 	unsigned int		unsafe_warn:1;	/* warned about RW1C config write */
691 
692 	ANDROID_KABI_RESERVE(1);
693 	ANDROID_KABI_RESERVE(2);
694 	ANDROID_KABI_RESERVE(3);
695 	ANDROID_KABI_RESERVE(4);
696 };
697 
698 #define to_pci_bus(n)	container_of(n, struct pci_bus, dev)
699 
pci_dev_id(struct pci_dev * dev)700 static inline u16 pci_dev_id(struct pci_dev *dev)
701 {
702 	return PCI_DEVID(dev->bus->number, dev->devfn);
703 }
704 
705 /*
706  * Returns true if the PCI bus is root (behind host-PCI bridge),
707  * false otherwise
708  *
709  * Some code assumes that "bus->self == NULL" means that bus is a root bus.
710  * This is incorrect because "virtual" buses added for SR-IOV (via
711  * virtfn_add_bus()) have "bus->self == NULL" but are not root buses.
712  */
pci_is_root_bus(struct pci_bus * pbus)713 static inline bool pci_is_root_bus(struct pci_bus *pbus)
714 {
715 	return !(pbus->parent);
716 }
717 
718 /**
719  * pci_is_bridge - check if the PCI device is a bridge
720  * @dev: PCI device
721  *
722  * Return true if the PCI device is bridge whether it has subordinate
723  * or not.
724  */
pci_is_bridge(struct pci_dev * dev)725 static inline bool pci_is_bridge(struct pci_dev *dev)
726 {
727 	return dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
728 		dev->hdr_type == PCI_HEADER_TYPE_CARDBUS;
729 }
730 
731 #define for_each_pci_bridge(dev, bus)				\
732 	list_for_each_entry(dev, &bus->devices, bus_list)	\
733 		if (!pci_is_bridge(dev)) {} else
734 
pci_upstream_bridge(struct pci_dev * dev)735 static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev)
736 {
737 	dev = pci_physfn(dev);
738 	if (pci_is_root_bus(dev->bus))
739 		return NULL;
740 
741 	return dev->bus->self;
742 }
743 
744 #ifdef CONFIG_PCI_MSI
pci_dev_msi_enabled(struct pci_dev * pci_dev)745 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev)
746 {
747 	return pci_dev->msi_enabled || pci_dev->msix_enabled;
748 }
749 #else
pci_dev_msi_enabled(struct pci_dev * pci_dev)750 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; }
751 #endif
752 
753 /* Error values that may be returned by PCI functions */
754 #define PCIBIOS_SUCCESSFUL		0x00
755 #define PCIBIOS_FUNC_NOT_SUPPORTED	0x81
756 #define PCIBIOS_BAD_VENDOR_ID		0x83
757 #define PCIBIOS_DEVICE_NOT_FOUND	0x86
758 #define PCIBIOS_BAD_REGISTER_NUMBER	0x87
759 #define PCIBIOS_SET_FAILED		0x88
760 #define PCIBIOS_BUFFER_TOO_SMALL	0x89
761 
762 /* Translate above to generic errno for passing back through non-PCI code */
pcibios_err_to_errno(int err)763 static inline int pcibios_err_to_errno(int err)
764 {
765 	if (err <= PCIBIOS_SUCCESSFUL)
766 		return err; /* Assume already errno */
767 
768 	switch (err) {
769 	case PCIBIOS_FUNC_NOT_SUPPORTED:
770 		return -ENOENT;
771 	case PCIBIOS_BAD_VENDOR_ID:
772 		return -ENOTTY;
773 	case PCIBIOS_DEVICE_NOT_FOUND:
774 		return -ENODEV;
775 	case PCIBIOS_BAD_REGISTER_NUMBER:
776 		return -EFAULT;
777 	case PCIBIOS_SET_FAILED:
778 		return -EIO;
779 	case PCIBIOS_BUFFER_TOO_SMALL:
780 		return -ENOSPC;
781 	}
782 
783 	return -ERANGE;
784 }
785 
786 /* Low-level architecture-dependent routines */
787 
788 struct pci_ops {
789 	int (*add_bus)(struct pci_bus *bus);
790 	void (*remove_bus)(struct pci_bus *bus);
791 	void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where);
792 	int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val);
793 	int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val);
794 
795 	ANDROID_KABI_RESERVE(1);
796 };
797 
798 /*
799  * ACPI needs to be able to access PCI config space before we've done a
800  * PCI bus scan and created pci_bus structures.
801  */
802 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
803 		 int reg, int len, u32 *val);
804 int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
805 		  int reg, int len, u32 val);
806 
807 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
808 typedef u64 pci_bus_addr_t;
809 #else
810 typedef u32 pci_bus_addr_t;
811 #endif
812 
813 struct pci_bus_region {
814 	pci_bus_addr_t	start;
815 	pci_bus_addr_t	end;
816 };
817 
818 struct pci_dynids {
819 	spinlock_t		lock;	/* Protects list, index */
820 	struct list_head	list;	/* For IDs added at runtime */
821 };
822 
823 
824 /*
825  * PCI Error Recovery System (PCI-ERS).  If a PCI device driver provides
826  * a set of callbacks in struct pci_error_handlers, that device driver
827  * will be notified of PCI bus errors, and will be driven to recovery
828  * when an error occurs.
829  */
830 
831 typedef unsigned int __bitwise pci_ers_result_t;
832 
833 enum pci_ers_result {
834 	/* No result/none/not supported in device driver */
835 	PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1,
836 
837 	/* Device driver can recover without slot reset */
838 	PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2,
839 
840 	/* Device driver wants slot to be reset */
841 	PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3,
842 
843 	/* Device has completely failed, is unrecoverable */
844 	PCI_ERS_RESULT_DISCONNECT = (__force pci_ers_result_t) 4,
845 
846 	/* Device driver is fully recovered and operational */
847 	PCI_ERS_RESULT_RECOVERED = (__force pci_ers_result_t) 5,
848 
849 	/* No AER capabilities registered for the driver */
850 	PCI_ERS_RESULT_NO_AER_DRIVER = (__force pci_ers_result_t) 6,
851 };
852 
853 /* PCI bus error event callbacks */
854 struct pci_error_handlers {
855 	/* PCI bus error detected on this device */
856 	pci_ers_result_t (*error_detected)(struct pci_dev *dev,
857 					   pci_channel_state_t error);
858 
859 	/* MMIO has been re-enabled, but not DMA */
860 	pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev);
861 
862 	/* PCI slot has been reset */
863 	pci_ers_result_t (*slot_reset)(struct pci_dev *dev);
864 
865 	/* PCI function reset prepare or completed */
866 	void (*reset_prepare)(struct pci_dev *dev);
867 	void (*reset_done)(struct pci_dev *dev);
868 
869 	/* Device driver may resume normal operations */
870 	void (*resume)(struct pci_dev *dev);
871 
872 	/* Allow device driver to record more details of a correctable error */
873 	void (*cor_error_detected)(struct pci_dev *dev);
874 
875 	ANDROID_KABI_RESERVE(1);
876 };
877 
878 
879 struct module;
880 
881 /**
882  * struct pci_driver - PCI driver structure
883  * @node:	List of driver structures.
884  * @name:	Driver name.
885  * @id_table:	Pointer to table of device IDs the driver is
886  *		interested in.  Most drivers should export this
887  *		table using MODULE_DEVICE_TABLE(pci,...).
888  * @probe:	This probing function gets called (during execution
889  *		of pci_register_driver() for already existing
890  *		devices or later if a new device gets inserted) for
891  *		all PCI devices which match the ID table and are not
892  *		"owned" by the other drivers yet. This function gets
893  *		passed a "struct pci_dev \*" for each device whose
894  *		entry in the ID table matches the device. The probe
895  *		function returns zero when the driver chooses to
896  *		take "ownership" of the device or an error code
897  *		(negative number) otherwise.
898  *		The probe function always gets called from process
899  *		context, so it can sleep.
900  * @remove:	The remove() function gets called whenever a device
901  *		being handled by this driver is removed (either during
902  *		deregistration of the driver or when it's manually
903  *		pulled out of a hot-pluggable slot).
904  *		The remove function always gets called from process
905  *		context, so it can sleep.
906  * @suspend:	Put device into low power state.
907  * @resume:	Wake device from low power state.
908  *		(Please see Documentation/power/pci.rst for descriptions
909  *		of PCI Power Management and the related functions.)
910  * @shutdown:	Hook into reboot_notifier_list (kernel/sys.c).
911  *		Intended to stop any idling DMA operations.
912  *		Useful for enabling wake-on-lan (NIC) or changing
913  *		the power state of a device before reboot.
914  *		e.g. drivers/net/e100.c.
915  * @sriov_configure: Optional driver callback to allow configuration of
916  *		number of VFs to enable via sysfs "sriov_numvfs" file.
917  * @sriov_set_msix_vec_count: PF Driver callback to change number of MSI-X
918  *              vectors on a VF. Triggered via sysfs "sriov_vf_msix_count".
919  *              This will change MSI-X Table Size in the VF Message Control
920  *              registers.
921  * @sriov_get_vf_total_msix: PF driver callback to get the total number of
922  *              MSI-X vectors available for distribution to the VFs.
923  * @err_handler: See Documentation/PCI/pci-error-recovery.rst
924  * @groups:	Sysfs attribute groups.
925  * @dev_groups: Attributes attached to the device that will be
926  *              created once it is bound to the driver.
927  * @driver:	Driver model structure.
928  * @dynids:	List of dynamically added device IDs.
929  * @driver_managed_dma: Device driver doesn't use kernel DMA API for DMA.
930  *		For most device drivers, no need to care about this flag
931  *		as long as all DMAs are handled through the kernel DMA API.
932  *		For some special ones, for example VFIO drivers, they know
933  *		how to manage the DMA themselves and set this flag so that
934  *		the IOMMU layer will allow them to setup and manage their
935  *		own I/O address space.
936  */
937 struct pci_driver {
938 	struct list_head	node;
939 	const char		*name;
940 	const struct pci_device_id *id_table;	/* Must be non-NULL for probe to be called */
941 	int  (*probe)(struct pci_dev *dev, const struct pci_device_id *id);	/* New device inserted */
942 	void (*remove)(struct pci_dev *dev);	/* Device removed (NULL if not a hot-plug capable driver) */
943 	int  (*suspend)(struct pci_dev *dev, pm_message_t state);	/* Device suspended */
944 	int  (*resume)(struct pci_dev *dev);	/* Device woken up */
945 	void (*shutdown)(struct pci_dev *dev);
946 	int  (*sriov_configure)(struct pci_dev *dev, int num_vfs); /* On PF */
947 	int  (*sriov_set_msix_vec_count)(struct pci_dev *vf, int msix_vec_count); /* On PF */
948 	u32  (*sriov_get_vf_total_msix)(struct pci_dev *pf);
949 	const struct pci_error_handlers *err_handler;
950 	const struct attribute_group **groups;
951 	const struct attribute_group **dev_groups;
952 	struct device_driver	driver;
953 	struct pci_dynids	dynids;
954 	bool driver_managed_dma;
955 
956 	ANDROID_KABI_RESERVE(1);
957 	ANDROID_KABI_RESERVE(2);
958 	ANDROID_KABI_RESERVE(3);
959 	ANDROID_KABI_RESERVE(4);
960 };
961 
to_pci_driver(struct device_driver * drv)962 static inline struct pci_driver *to_pci_driver(struct device_driver *drv)
963 {
964     return drv ? container_of(drv, struct pci_driver, driver) : NULL;
965 }
966 
967 /**
968  * PCI_DEVICE - macro used to describe a specific PCI device
969  * @vend: the 16 bit PCI Vendor ID
970  * @dev: the 16 bit PCI Device ID
971  *
972  * This macro is used to create a struct pci_device_id that matches a
973  * specific device.  The subvendor and subdevice fields will be set to
974  * PCI_ANY_ID.
975  */
976 #define PCI_DEVICE(vend,dev) \
977 	.vendor = (vend), .device = (dev), \
978 	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
979 
980 /**
981  * PCI_DEVICE_DRIVER_OVERRIDE - macro used to describe a PCI device with
982  *                              override_only flags.
983  * @vend: the 16 bit PCI Vendor ID
984  * @dev: the 16 bit PCI Device ID
985  * @driver_override: the 32 bit PCI Device override_only
986  *
987  * This macro is used to create a struct pci_device_id that matches only a
988  * driver_override device. The subvendor and subdevice fields will be set to
989  * PCI_ANY_ID.
990  */
991 #define PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, driver_override) \
992 	.vendor = (vend), .device = (dev), .subvendor = PCI_ANY_ID, \
993 	.subdevice = PCI_ANY_ID, .override_only = (driver_override)
994 
995 /**
996  * PCI_DRIVER_OVERRIDE_DEVICE_VFIO - macro used to describe a VFIO
997  *                                   "driver_override" PCI device.
998  * @vend: the 16 bit PCI Vendor ID
999  * @dev: the 16 bit PCI Device ID
1000  *
1001  * This macro is used to create a struct pci_device_id that matches a
1002  * specific device. The subvendor and subdevice fields will be set to
1003  * PCI_ANY_ID and the driver_override will be set to
1004  * PCI_ID_F_VFIO_DRIVER_OVERRIDE.
1005  */
1006 #define PCI_DRIVER_OVERRIDE_DEVICE_VFIO(vend, dev) \
1007 	PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, PCI_ID_F_VFIO_DRIVER_OVERRIDE)
1008 
1009 /**
1010  * PCI_DEVICE_SUB - macro used to describe a specific PCI device with subsystem
1011  * @vend: the 16 bit PCI Vendor ID
1012  * @dev: the 16 bit PCI Device ID
1013  * @subvend: the 16 bit PCI Subvendor ID
1014  * @subdev: the 16 bit PCI Subdevice ID
1015  *
1016  * This macro is used to create a struct pci_device_id that matches a
1017  * specific device with subsystem information.
1018  */
1019 #define PCI_DEVICE_SUB(vend, dev, subvend, subdev) \
1020 	.vendor = (vend), .device = (dev), \
1021 	.subvendor = (subvend), .subdevice = (subdev)
1022 
1023 /**
1024  * PCI_DEVICE_CLASS - macro used to describe a specific PCI device class
1025  * @dev_class: the class, subclass, prog-if triple for this device
1026  * @dev_class_mask: the class mask for this device
1027  *
1028  * This macro is used to create a struct pci_device_id that matches a
1029  * specific PCI class.  The vendor, device, subvendor, and subdevice
1030  * fields will be set to PCI_ANY_ID.
1031  */
1032 #define PCI_DEVICE_CLASS(dev_class,dev_class_mask) \
1033 	.class = (dev_class), .class_mask = (dev_class_mask), \
1034 	.vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \
1035 	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
1036 
1037 /**
1038  * PCI_VDEVICE - macro used to describe a specific PCI device in short form
1039  * @vend: the vendor name
1040  * @dev: the 16 bit PCI Device ID
1041  *
1042  * This macro is used to create a struct pci_device_id that matches a
1043  * specific PCI device.  The subvendor, and subdevice fields will be set
1044  * to PCI_ANY_ID. The macro allows the next field to follow as the device
1045  * private data.
1046  */
1047 #define PCI_VDEVICE(vend, dev) \
1048 	.vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
1049 	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0
1050 
1051 /**
1052  * PCI_DEVICE_DATA - macro used to describe a specific PCI device in very short form
1053  * @vend: the vendor name (without PCI_VENDOR_ID_ prefix)
1054  * @dev: the device name (without PCI_DEVICE_ID_<vend>_ prefix)
1055  * @data: the driver data to be filled
1056  *
1057  * This macro is used to create a struct pci_device_id that matches a
1058  * specific PCI device.  The subvendor, and subdevice fields will be set
1059  * to PCI_ANY_ID.
1060  */
1061 #define PCI_DEVICE_DATA(vend, dev, data) \
1062 	.vendor = PCI_VENDOR_ID_##vend, .device = PCI_DEVICE_ID_##vend##_##dev, \
1063 	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0, \
1064 	.driver_data = (kernel_ulong_t)(data)
1065 
1066 enum {
1067 	PCI_REASSIGN_ALL_RSRC	= 0x00000001,	/* Ignore firmware setup */
1068 	PCI_REASSIGN_ALL_BUS	= 0x00000002,	/* Reassign all bus numbers */
1069 	PCI_PROBE_ONLY		= 0x00000004,	/* Use existing setup */
1070 	PCI_CAN_SKIP_ISA_ALIGN	= 0x00000008,	/* Don't do ISA alignment */
1071 	PCI_ENABLE_PROC_DOMAINS	= 0x00000010,	/* Enable domains in /proc */
1072 	PCI_COMPAT_DOMAIN_0	= 0x00000020,	/* ... except domain 0 */
1073 	PCI_SCAN_ALL_PCIE_DEVS	= 0x00000040,	/* Scan all, not just dev 0 */
1074 };
1075 
1076 #define PCI_IRQ_LEGACY		(1 << 0) /* Allow legacy interrupts */
1077 #define PCI_IRQ_MSI		(1 << 1) /* Allow MSI interrupts */
1078 #define PCI_IRQ_MSIX		(1 << 2) /* Allow MSI-X interrupts */
1079 #define PCI_IRQ_AFFINITY	(1 << 3) /* Auto-assign affinity */
1080 
1081 /* These external functions are only available when PCI support is enabled */
1082 #ifdef CONFIG_PCI
1083 
1084 extern unsigned int pci_flags;
1085 
pci_set_flags(int flags)1086 static inline void pci_set_flags(int flags) { pci_flags = flags; }
pci_add_flags(int flags)1087 static inline void pci_add_flags(int flags) { pci_flags |= flags; }
pci_clear_flags(int flags)1088 static inline void pci_clear_flags(int flags) { pci_flags &= ~flags; }
pci_has_flag(int flag)1089 static inline int pci_has_flag(int flag) { return pci_flags & flag; }
1090 
1091 void pcie_bus_configure_settings(struct pci_bus *bus);
1092 
1093 enum pcie_bus_config_types {
1094 	PCIE_BUS_TUNE_OFF,	/* Don't touch MPS at all */
1095 	PCIE_BUS_DEFAULT,	/* Ensure MPS matches upstream bridge */
1096 	PCIE_BUS_SAFE,		/* Use largest MPS boot-time devices support */
1097 	PCIE_BUS_PERFORMANCE,	/* Use MPS and MRRS for best performance */
1098 	PCIE_BUS_PEER2PEER,	/* Set MPS = 128 for all devices */
1099 };
1100 
1101 extern enum pcie_bus_config_types pcie_bus_config;
1102 
1103 extern struct bus_type pci_bus_type;
1104 
1105 /* Do NOT directly access these two variables, unless you are arch-specific PCI
1106  * code, or PCI core code. */
1107 extern struct list_head pci_root_buses;	/* List of all known PCI buses */
1108 /* Some device drivers need know if PCI is initiated */
1109 int no_pci_devices(void);
1110 
1111 void pcibios_resource_survey_bus(struct pci_bus *bus);
1112 void pcibios_bus_add_device(struct pci_dev *pdev);
1113 void pcibios_add_bus(struct pci_bus *bus);
1114 void pcibios_remove_bus(struct pci_bus *bus);
1115 void pcibios_fixup_bus(struct pci_bus *);
1116 int __must_check pcibios_enable_device(struct pci_dev *, int mask);
1117 /* Architecture-specific versions may override this (weak) */
1118 char *pcibios_setup(char *str);
1119 
1120 /* Used only when drivers/pci/setup.c is used */
1121 resource_size_t pcibios_align_resource(void *, const struct resource *,
1122 				resource_size_t,
1123 				resource_size_t);
1124 
1125 /* Weak but can be overridden by arch */
1126 void pci_fixup_cardbus(struct pci_bus *);
1127 
1128 /* Generic PCI functions used internally */
1129 
1130 void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region,
1131 			     struct resource *res);
1132 void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res,
1133 			     struct pci_bus_region *region);
1134 void pcibios_scan_specific_bus(int busn);
1135 struct pci_bus *pci_find_bus(int domain, int busnr);
1136 void pci_bus_add_devices(const struct pci_bus *bus);
1137 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata);
1138 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
1139 				    struct pci_ops *ops, void *sysdata,
1140 				    struct list_head *resources);
1141 int pci_host_probe(struct pci_host_bridge *bridge);
1142 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax);
1143 int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax);
1144 void pci_bus_release_busn_res(struct pci_bus *b);
1145 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
1146 				  struct pci_ops *ops, void *sysdata,
1147 				  struct list_head *resources);
1148 int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge);
1149 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
1150 				int busnr);
1151 struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
1152 				 const char *name,
1153 				 struct hotplug_slot *hotplug);
1154 void pci_destroy_slot(struct pci_slot *slot);
1155 #ifdef CONFIG_SYSFS
1156 void pci_dev_assign_slot(struct pci_dev *dev);
1157 #else
pci_dev_assign_slot(struct pci_dev * dev)1158 static inline void pci_dev_assign_slot(struct pci_dev *dev) { }
1159 #endif
1160 int pci_scan_slot(struct pci_bus *bus, int devfn);
1161 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn);
1162 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus);
1163 unsigned int pci_scan_child_bus(struct pci_bus *bus);
1164 void pci_bus_add_device(struct pci_dev *dev);
1165 void pci_read_bridge_bases(struct pci_bus *child);
1166 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
1167 					  struct resource *res);
1168 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin);
1169 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
1170 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
1171 struct pci_dev *pci_dev_get(struct pci_dev *dev);
1172 void pci_dev_put(struct pci_dev *dev);
1173 DEFINE_FREE(pci_dev_put, struct pci_dev *, if (_T) pci_dev_put(_T))
1174 void pci_remove_bus(struct pci_bus *b);
1175 void pci_stop_and_remove_bus_device(struct pci_dev *dev);
1176 void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev);
1177 void pci_stop_root_bus(struct pci_bus *bus);
1178 void pci_remove_root_bus(struct pci_bus *bus);
1179 void pci_setup_cardbus(struct pci_bus *bus);
1180 void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type);
1181 void pci_sort_breadthfirst(void);
1182 #define dev_is_pci(d) ((d)->bus == &pci_bus_type)
1183 #define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false))
1184 
1185 /* Generic PCI functions exported to card drivers */
1186 
1187 u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap);
1188 u8 pci_find_capability(struct pci_dev *dev, int cap);
1189 u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap);
1190 u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap);
1191 u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap);
1192 u16 pci_find_ext_capability(struct pci_dev *dev, int cap);
1193 u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 pos, int cap);
1194 struct pci_bus *pci_find_next_bus(const struct pci_bus *from);
1195 u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap);
1196 u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec);
1197 
1198 u64 pci_get_dsn(struct pci_dev *dev);
1199 
1200 struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
1201 			       struct pci_dev *from);
1202 struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
1203 			       unsigned int ss_vendor, unsigned int ss_device,
1204 			       struct pci_dev *from);
1205 struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn);
1206 struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus,
1207 					    unsigned int devfn);
1208 struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from);
1209 struct pci_dev *pci_get_base_class(unsigned int class, struct pci_dev *from);
1210 
1211 int pci_dev_present(const struct pci_device_id *ids);
1212 
1213 int pci_bus_read_config_byte(struct pci_bus *bus, unsigned int devfn,
1214 			     int where, u8 *val);
1215 int pci_bus_read_config_word(struct pci_bus *bus, unsigned int devfn,
1216 			     int where, u16 *val);
1217 int pci_bus_read_config_dword(struct pci_bus *bus, unsigned int devfn,
1218 			      int where, u32 *val);
1219 int pci_bus_write_config_byte(struct pci_bus *bus, unsigned int devfn,
1220 			      int where, u8 val);
1221 int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn,
1222 			      int where, u16 val);
1223 int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn,
1224 			       int where, u32 val);
1225 
1226 int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
1227 			    int where, int size, u32 *val);
1228 int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
1229 			    int where, int size, u32 val);
1230 int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
1231 			      int where, int size, u32 *val);
1232 int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
1233 			       int where, int size, u32 val);
1234 
1235 struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops);
1236 
1237 int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val);
1238 int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val);
1239 int pci_read_config_dword(const struct pci_dev *dev, int where, u32 *val);
1240 int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val);
1241 int pci_write_config_word(const struct pci_dev *dev, int where, u16 val);
1242 int pci_write_config_dword(const struct pci_dev *dev, int where, u32 val);
1243 
1244 int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
1245 int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val);
1246 int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val);
1247 int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val);
1248 int pcie_capability_clear_and_set_word_unlocked(struct pci_dev *dev, int pos,
1249 						u16 clear, u16 set);
1250 int pcie_capability_clear_and_set_word_locked(struct pci_dev *dev, int pos,
1251 					      u16 clear, u16 set);
1252 int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
1253 					u32 clear, u32 set);
1254 
1255 /**
1256  * pcie_capability_clear_and_set_word - RMW accessor for PCI Express Capability Registers
1257  * @dev:	PCI device structure of the PCI Express device
1258  * @pos:	PCI Express Capability Register
1259  * @clear:	Clear bitmask
1260  * @set:	Set bitmask
1261  *
1262  * Perform a Read-Modify-Write (RMW) operation using @clear and @set
1263  * bitmasks on PCI Express Capability Register at @pos. Certain PCI Express
1264  * Capability Registers are accessed concurrently in RMW fashion, hence
1265  * require locking which is handled transparently to the caller.
1266  */
pcie_capability_clear_and_set_word(struct pci_dev * dev,int pos,u16 clear,u16 set)1267 static inline int pcie_capability_clear_and_set_word(struct pci_dev *dev,
1268 						     int pos,
1269 						     u16 clear, u16 set)
1270 {
1271 	switch (pos) {
1272 	case PCI_EXP_LNKCTL:
1273 	case PCI_EXP_RTCTL:
1274 		return pcie_capability_clear_and_set_word_locked(dev, pos,
1275 								 clear, set);
1276 	default:
1277 		return pcie_capability_clear_and_set_word_unlocked(dev, pos,
1278 								   clear, set);
1279 	}
1280 }
1281 
pcie_capability_set_word(struct pci_dev * dev,int pos,u16 set)1282 static inline int pcie_capability_set_word(struct pci_dev *dev, int pos,
1283 					   u16 set)
1284 {
1285 	return pcie_capability_clear_and_set_word(dev, pos, 0, set);
1286 }
1287 
pcie_capability_set_dword(struct pci_dev * dev,int pos,u32 set)1288 static inline int pcie_capability_set_dword(struct pci_dev *dev, int pos,
1289 					    u32 set)
1290 {
1291 	return pcie_capability_clear_and_set_dword(dev, pos, 0, set);
1292 }
1293 
pcie_capability_clear_word(struct pci_dev * dev,int pos,u16 clear)1294 static inline int pcie_capability_clear_word(struct pci_dev *dev, int pos,
1295 					     u16 clear)
1296 {
1297 	return pcie_capability_clear_and_set_word(dev, pos, clear, 0);
1298 }
1299 
pcie_capability_clear_dword(struct pci_dev * dev,int pos,u32 clear)1300 static inline int pcie_capability_clear_dword(struct pci_dev *dev, int pos,
1301 					      u32 clear)
1302 {
1303 	return pcie_capability_clear_and_set_dword(dev, pos, clear, 0);
1304 }
1305 
1306 /* User-space driven config access */
1307 int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val);
1308 int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val);
1309 int pci_user_read_config_dword(struct pci_dev *dev, int where, u32 *val);
1310 int pci_user_write_config_byte(struct pci_dev *dev, int where, u8 val);
1311 int pci_user_write_config_word(struct pci_dev *dev, int where, u16 val);
1312 int pci_user_write_config_dword(struct pci_dev *dev, int where, u32 val);
1313 
1314 int __must_check pci_enable_device(struct pci_dev *dev);
1315 int __must_check pci_enable_device_io(struct pci_dev *dev);
1316 int __must_check pci_enable_device_mem(struct pci_dev *dev);
1317 int __must_check pci_reenable_device(struct pci_dev *);
1318 int __must_check pcim_enable_device(struct pci_dev *pdev);
1319 void pcim_pin_device(struct pci_dev *pdev);
1320 
pci_intx_mask_supported(struct pci_dev * pdev)1321 static inline bool pci_intx_mask_supported(struct pci_dev *pdev)
1322 {
1323 	/*
1324 	 * INTx masking is supported if PCI_COMMAND_INTX_DISABLE is
1325 	 * writable and no quirk has marked the feature broken.
1326 	 */
1327 	return !pdev->broken_intx_masking;
1328 }
1329 
pci_is_enabled(struct pci_dev * pdev)1330 static inline int pci_is_enabled(struct pci_dev *pdev)
1331 {
1332 	return (atomic_read(&pdev->enable_cnt) > 0);
1333 }
1334 
pci_is_managed(struct pci_dev * pdev)1335 static inline int pci_is_managed(struct pci_dev *pdev)
1336 {
1337 	return pdev->is_managed;
1338 }
1339 
1340 void pci_disable_device(struct pci_dev *dev);
1341 
1342 extern unsigned int pcibios_max_latency;
1343 void pci_set_master(struct pci_dev *dev);
1344 void pci_clear_master(struct pci_dev *dev);
1345 
1346 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state);
1347 int pci_set_cacheline_size(struct pci_dev *dev);
1348 int __must_check pci_set_mwi(struct pci_dev *dev);
1349 int __must_check pcim_set_mwi(struct pci_dev *dev);
1350 int pci_try_set_mwi(struct pci_dev *dev);
1351 void pci_clear_mwi(struct pci_dev *dev);
1352 void pci_disable_parity(struct pci_dev *dev);
1353 void pci_intx(struct pci_dev *dev, int enable);
1354 bool pci_check_and_mask_intx(struct pci_dev *dev);
1355 bool pci_check_and_unmask_intx(struct pci_dev *dev);
1356 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask);
1357 int pci_wait_for_pending_transaction(struct pci_dev *dev);
1358 int pcix_get_max_mmrbc(struct pci_dev *dev);
1359 int pcix_get_mmrbc(struct pci_dev *dev);
1360 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc);
1361 int pcie_get_readrq(struct pci_dev *dev);
1362 int pcie_set_readrq(struct pci_dev *dev, int rq);
1363 int pcie_get_mps(struct pci_dev *dev);
1364 int pcie_set_mps(struct pci_dev *dev, int mps);
1365 u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
1366 			     enum pci_bus_speed *speed,
1367 			     enum pcie_link_width *width);
1368 void pcie_print_link_status(struct pci_dev *dev);
1369 int pcie_reset_flr(struct pci_dev *dev, bool probe);
1370 int pcie_flr(struct pci_dev *dev);
1371 int __pci_reset_function_locked(struct pci_dev *dev);
1372 int pci_reset_function(struct pci_dev *dev);
1373 int pci_reset_function_locked(struct pci_dev *dev);
1374 int pci_try_reset_function(struct pci_dev *dev);
1375 int pci_probe_reset_slot(struct pci_slot *slot);
1376 int pci_probe_reset_bus(struct pci_bus *bus);
1377 int pci_reset_bus(struct pci_dev *dev);
1378 void pci_reset_secondary_bus(struct pci_dev *dev);
1379 void pcibios_reset_secondary_bus(struct pci_dev *dev);
1380 void pci_update_resource(struct pci_dev *dev, int resno);
1381 int __must_check pci_assign_resource(struct pci_dev *dev, int i);
1382 int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
1383 void pci_release_resource(struct pci_dev *dev, int resno);
pci_rebar_bytes_to_size(u64 bytes)1384 static inline int pci_rebar_bytes_to_size(u64 bytes)
1385 {
1386 	bytes = roundup_pow_of_two(bytes);
1387 
1388 	/* Return BAR size as defined in the resizable BAR specification */
1389 	return max(ilog2(bytes), 20) - 20;
1390 }
1391 
1392 u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar);
1393 int __must_check pci_resize_resource(struct pci_dev *dev, int i, int size);
1394 int pci_select_bars(struct pci_dev *dev, unsigned long flags);
1395 bool pci_device_is_present(struct pci_dev *pdev);
1396 void pci_ignore_hotplug(struct pci_dev *dev);
1397 struct pci_dev *pci_real_dma_dev(struct pci_dev *dev);
1398 int pci_status_get_and_clear_errors(struct pci_dev *pdev);
1399 
1400 int __printf(6, 7) pci_request_irq(struct pci_dev *dev, unsigned int nr,
1401 		irq_handler_t handler, irq_handler_t thread_fn, void *dev_id,
1402 		const char *fmt, ...);
1403 void pci_free_irq(struct pci_dev *dev, unsigned int nr, void *dev_id);
1404 
1405 /* ROM control related routines */
1406 int pci_enable_rom(struct pci_dev *pdev);
1407 void pci_disable_rom(struct pci_dev *pdev);
1408 void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);
1409 void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom);
1410 
1411 /* Power management related routines */
1412 int pci_save_state(struct pci_dev *dev);
1413 void pci_restore_state(struct pci_dev *dev);
1414 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev);
1415 int pci_load_saved_state(struct pci_dev *dev,
1416 			 struct pci_saved_state *state);
1417 int pci_load_and_free_saved_state(struct pci_dev *dev,
1418 				  struct pci_saved_state **state);
1419 int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state);
1420 int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
1421 int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state);
1422 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
1423 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
1424 void pci_pme_active(struct pci_dev *dev, bool enable);
1425 int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable);
1426 int pci_wake_from_d3(struct pci_dev *dev, bool enable);
1427 int pci_prepare_to_sleep(struct pci_dev *dev);
1428 int pci_back_from_sleep(struct pci_dev *dev);
1429 bool pci_dev_run_wake(struct pci_dev *dev);
1430 void pci_d3cold_enable(struct pci_dev *dev);
1431 void pci_d3cold_disable(struct pci_dev *dev);
1432 bool pcie_relaxed_ordering_enabled(struct pci_dev *dev);
1433 void pci_resume_bus(struct pci_bus *bus);
1434 void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state);
1435 
1436 /* For use by arch with custom probe code */
1437 void set_pcie_port_type(struct pci_dev *pdev);
1438 void set_pcie_hotplug_bridge(struct pci_dev *pdev);
1439 
1440 /* Functions for PCI Hotplug drivers to use */
1441 unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge);
1442 unsigned int pci_rescan_bus(struct pci_bus *bus);
1443 void pci_lock_rescan_remove(void);
1444 void pci_unlock_rescan_remove(void);
1445 
1446 /* Vital Product Data routines */
1447 ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
1448 ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
1449 ssize_t pci_read_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
1450 ssize_t pci_write_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
1451 
1452 /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
1453 resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
1454 void pci_bus_assign_resources(const struct pci_bus *bus);
1455 void pci_bus_claim_resources(struct pci_bus *bus);
1456 void pci_bus_size_bridges(struct pci_bus *bus);
1457 int pci_claim_resource(struct pci_dev *, int);
1458 int pci_claim_bridge_resource(struct pci_dev *bridge, int i);
1459 void pci_assign_unassigned_resources(void);
1460 void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge);
1461 void pci_assign_unassigned_bus_resources(struct pci_bus *bus);
1462 void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus);
1463 int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type);
1464 int pci_enable_resources(struct pci_dev *, int mask);
1465 void pci_assign_irq(struct pci_dev *dev);
1466 struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res);
1467 #define HAVE_PCI_REQ_REGIONS	2
1468 int __must_check pci_request_regions(struct pci_dev *, const char *);
1469 int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *);
1470 void pci_release_regions(struct pci_dev *);
1471 int __must_check pci_request_region(struct pci_dev *, int, const char *);
1472 void pci_release_region(struct pci_dev *, int);
1473 int pci_request_selected_regions(struct pci_dev *, int, const char *);
1474 int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *);
1475 void pci_release_selected_regions(struct pci_dev *, int);
1476 
1477 static inline __must_check struct resource *
pci_request_config_region_exclusive(struct pci_dev * pdev,unsigned int offset,unsigned int len,const char * name)1478 pci_request_config_region_exclusive(struct pci_dev *pdev, unsigned int offset,
1479 				    unsigned int len, const char *name)
1480 {
1481 	return __request_region(&pdev->driver_exclusive_resource, offset, len,
1482 				name, IORESOURCE_EXCLUSIVE);
1483 }
1484 
pci_release_config_region(struct pci_dev * pdev,unsigned int offset,unsigned int len)1485 static inline void pci_release_config_region(struct pci_dev *pdev,
1486 					     unsigned int offset,
1487 					     unsigned int len)
1488 {
1489 	__release_region(&pdev->driver_exclusive_resource, offset, len);
1490 }
1491 
1492 /* drivers/pci/bus.c */
1493 void pci_add_resource(struct list_head *resources, struct resource *res);
1494 void pci_add_resource_offset(struct list_head *resources, struct resource *res,
1495 			     resource_size_t offset);
1496 void pci_free_resource_list(struct list_head *resources);
1497 void pci_bus_add_resource(struct pci_bus *bus, struct resource *res,
1498 			  unsigned int flags);
1499 struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n);
1500 void pci_bus_remove_resources(struct pci_bus *bus);
1501 void pci_bus_remove_resource(struct pci_bus *bus, struct resource *res);
1502 int devm_request_pci_bus_resources(struct device *dev,
1503 				   struct list_head *resources);
1504 
1505 /* Temporary until new and working PCI SBR API in place */
1506 int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
1507 
1508 #define __pci_bus_for_each_res0(bus, res, ...)				\
1509 	for (unsigned int __b = 0;					\
1510 	     (res = pci_bus_resource_n(bus, __b)) || __b < PCI_BRIDGE_RESOURCE_NUM; \
1511 	     __b++)
1512 
1513 #define __pci_bus_for_each_res1(bus, res, __b)				\
1514 	for (__b = 0;							\
1515 	     (res = pci_bus_resource_n(bus, __b)) || __b < PCI_BRIDGE_RESOURCE_NUM; \
1516 	     __b++)
1517 
1518 /**
1519  * pci_bus_for_each_resource - iterate over PCI bus resources
1520  * @bus: the PCI bus
1521  * @res: pointer to the current resource
1522  * @...: optional index of the current resource
1523  *
1524  * Iterate over PCI bus resources. The first part is to go over PCI bus
1525  * resource array, which has at most the %PCI_BRIDGE_RESOURCE_NUM entries.
1526  * After that continue with the separate list of the additional resources,
1527  * if not empty. That's why the Logical OR is being used.
1528  *
1529  * Possible usage:
1530  *
1531  *	struct pci_bus *bus = ...;
1532  *	struct resource *res;
1533  *	unsigned int i;
1534  *
1535  * 	// With optional index
1536  * 	pci_bus_for_each_resource(bus, res, i)
1537  * 		pr_info("PCI bus resource[%u]: %pR\n", i, res);
1538  *
1539  * 	// Without index
1540  * 	pci_bus_for_each_resource(bus, res)
1541  * 		_do_something_(res);
1542  */
1543 #define pci_bus_for_each_resource(bus, res, ...)			\
1544 	CONCATENATE(__pci_bus_for_each_res, COUNT_ARGS(__VA_ARGS__))	\
1545 		    (bus, res, __VA_ARGS__)
1546 
1547 int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
1548 			struct resource *res, resource_size_t size,
1549 			resource_size_t align, resource_size_t min,
1550 			unsigned long type_mask,
1551 			resource_size_t (*alignf)(void *,
1552 						  const struct resource *,
1553 						  resource_size_t,
1554 						  resource_size_t),
1555 			void *alignf_data);
1556 
1557 
1558 int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
1559 			resource_size_t size);
1560 unsigned long pci_address_to_pio(phys_addr_t addr);
1561 phys_addr_t pci_pio_to_address(unsigned long pio);
1562 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
1563 int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
1564 			   phys_addr_t phys_addr);
1565 void pci_unmap_iospace(struct resource *res);
1566 void __iomem *devm_pci_remap_cfgspace(struct device *dev,
1567 				      resource_size_t offset,
1568 				      resource_size_t size);
1569 void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
1570 					  struct resource *res);
1571 
pci_bus_address(struct pci_dev * pdev,int bar)1572 static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
1573 {
1574 	struct pci_bus_region region;
1575 
1576 	pcibios_resource_to_bus(pdev->bus, &region, &pdev->resource[bar]);
1577 	return region.start;
1578 }
1579 
1580 /* Proper probing supporting hot-pluggable devices */
1581 int __must_check __pci_register_driver(struct pci_driver *, struct module *,
1582 				       const char *mod_name);
1583 
1584 /* pci_register_driver() must be a macro so KBUILD_MODNAME can be expanded */
1585 #define pci_register_driver(driver)		\
1586 	__pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
1587 
1588 void pci_unregister_driver(struct pci_driver *dev);
1589 
1590 /**
1591  * module_pci_driver() - Helper macro for registering a PCI driver
1592  * @__pci_driver: pci_driver struct
1593  *
1594  * Helper macro for PCI drivers which do not do anything special in module
1595  * init/exit. This eliminates a lot of boilerplate. Each module may only
1596  * use this macro once, and calling it replaces module_init() and module_exit()
1597  */
1598 #define module_pci_driver(__pci_driver) \
1599 	module_driver(__pci_driver, pci_register_driver, pci_unregister_driver)
1600 
1601 /**
1602  * builtin_pci_driver() - Helper macro for registering a PCI driver
1603  * @__pci_driver: pci_driver struct
1604  *
1605  * Helper macro for PCI drivers which do not do anything special in their
1606  * init code. This eliminates a lot of boilerplate. Each driver may only
1607  * use this macro once, and calling it replaces device_initcall(...)
1608  */
1609 #define builtin_pci_driver(__pci_driver) \
1610 	builtin_driver(__pci_driver, pci_register_driver)
1611 
1612 struct pci_driver *pci_dev_driver(const struct pci_dev *dev);
1613 int pci_add_dynid(struct pci_driver *drv,
1614 		  unsigned int vendor, unsigned int device,
1615 		  unsigned int subvendor, unsigned int subdevice,
1616 		  unsigned int class, unsigned int class_mask,
1617 		  unsigned long driver_data);
1618 const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
1619 					 struct pci_dev *dev);
1620 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
1621 		    int pass);
1622 
1623 void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
1624 		  void *userdata);
1625 void pci_walk_bus_locked(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
1626 			 void *userdata);
1627 int pci_cfg_space_size(struct pci_dev *dev);
1628 unsigned char pci_bus_max_busnr(struct pci_bus *bus);
1629 void pci_setup_bridge(struct pci_bus *bus);
1630 resource_size_t pcibios_window_alignment(struct pci_bus *bus,
1631 					 unsigned long type);
1632 
1633 #define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0)
1634 #define PCI_VGA_STATE_CHANGE_DECODES (1 << 1)
1635 
1636 int pci_set_vga_state(struct pci_dev *pdev, bool decode,
1637 		      unsigned int command_bits, u32 flags);
1638 
1639 /*
1640  * Virtual interrupts allow for more interrupts to be allocated
1641  * than the device has interrupts for. These are not programmed
1642  * into the device's MSI-X table and must be handled by some
1643  * other driver means.
1644  */
1645 #define PCI_IRQ_VIRTUAL		(1 << 4)
1646 
1647 #define PCI_IRQ_ALL_TYPES \
1648 	(PCI_IRQ_LEGACY | PCI_IRQ_MSI | PCI_IRQ_MSIX)
1649 
1650 #include <linux/dmapool.h>
1651 
1652 struct msix_entry {
1653 	u32	vector;	/* Kernel uses to write allocated vector */
1654 	u16	entry;	/* Driver uses to specify entry, OS writes */
1655 };
1656 
1657 struct msi_domain_template;
1658 
1659 #ifdef CONFIG_PCI_MSI
1660 int pci_msi_vec_count(struct pci_dev *dev);
1661 void pci_disable_msi(struct pci_dev *dev);
1662 int pci_msix_vec_count(struct pci_dev *dev);
1663 void pci_disable_msix(struct pci_dev *dev);
1664 void pci_restore_msi_state(struct pci_dev *dev);
1665 int pci_msi_enabled(void);
1666 int pci_enable_msi(struct pci_dev *dev);
1667 int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
1668 			  int minvec, int maxvec);
pci_enable_msix_exact(struct pci_dev * dev,struct msix_entry * entries,int nvec)1669 static inline int pci_enable_msix_exact(struct pci_dev *dev,
1670 					struct msix_entry *entries, int nvec)
1671 {
1672 	int rc = pci_enable_msix_range(dev, entries, nvec, nvec);
1673 	if (rc < 0)
1674 		return rc;
1675 	return 0;
1676 }
1677 int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
1678 			  unsigned int max_vecs, unsigned int flags);
1679 int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1680 				   unsigned int max_vecs, unsigned int flags,
1681 				   struct irq_affinity *affd);
1682 
1683 bool pci_msix_can_alloc_dyn(struct pci_dev *dev);
1684 struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index,
1685 				     const struct irq_affinity_desc *affdesc);
1686 void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map);
1687 
1688 void pci_free_irq_vectors(struct pci_dev *dev);
1689 int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
1690 const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec);
1691 bool pci_create_ims_domain(struct pci_dev *pdev, const struct msi_domain_template *template,
1692 			   unsigned int hwsize, void *data);
1693 struct msi_map pci_ims_alloc_irq(struct pci_dev *pdev, union msi_instance_cookie *icookie,
1694 				 const struct irq_affinity_desc *affdesc);
1695 void pci_ims_free_irq(struct pci_dev *pdev, struct msi_map map);
1696 
1697 #else
pci_msi_vec_count(struct pci_dev * dev)1698 static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
pci_disable_msi(struct pci_dev * dev)1699 static inline void pci_disable_msi(struct pci_dev *dev) { }
pci_msix_vec_count(struct pci_dev * dev)1700 static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; }
pci_disable_msix(struct pci_dev * dev)1701 static inline void pci_disable_msix(struct pci_dev *dev) { }
pci_restore_msi_state(struct pci_dev * dev)1702 static inline void pci_restore_msi_state(struct pci_dev *dev) { }
pci_msi_enabled(void)1703 static inline int pci_msi_enabled(void) { return 0; }
pci_enable_msi(struct pci_dev * dev)1704 static inline int pci_enable_msi(struct pci_dev *dev)
1705 { return -ENOSYS; }
pci_enable_msix_range(struct pci_dev * dev,struct msix_entry * entries,int minvec,int maxvec)1706 static inline int pci_enable_msix_range(struct pci_dev *dev,
1707 			struct msix_entry *entries, int minvec, int maxvec)
1708 { return -ENOSYS; }
pci_enable_msix_exact(struct pci_dev * dev,struct msix_entry * entries,int nvec)1709 static inline int pci_enable_msix_exact(struct pci_dev *dev,
1710 			struct msix_entry *entries, int nvec)
1711 { return -ENOSYS; }
1712 
1713 static inline int
pci_alloc_irq_vectors_affinity(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags,struct irq_affinity * aff_desc)1714 pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1715 			       unsigned int max_vecs, unsigned int flags,
1716 			       struct irq_affinity *aff_desc)
1717 {
1718 	if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1 && dev->irq)
1719 		return 1;
1720 	return -ENOSPC;
1721 }
1722 static inline int
pci_alloc_irq_vectors(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags)1723 pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
1724 		      unsigned int max_vecs, unsigned int flags)
1725 {
1726 	return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs,
1727 					      flags, NULL);
1728 }
1729 
pci_msix_can_alloc_dyn(struct pci_dev * dev)1730 static inline bool pci_msix_can_alloc_dyn(struct pci_dev *dev)
1731 { return false; }
pci_msix_alloc_irq_at(struct pci_dev * dev,unsigned int index,const struct irq_affinity_desc * affdesc)1732 static inline struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index,
1733 						   const struct irq_affinity_desc *affdesc)
1734 {
1735 	struct msi_map map = { .index = -ENOSYS, };
1736 
1737 	return map;
1738 }
1739 
pci_msix_free_irq(struct pci_dev * pdev,struct msi_map map)1740 static inline void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map)
1741 {
1742 }
1743 
pci_free_irq_vectors(struct pci_dev * dev)1744 static inline void pci_free_irq_vectors(struct pci_dev *dev)
1745 {
1746 }
1747 
pci_irq_vector(struct pci_dev * dev,unsigned int nr)1748 static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
1749 {
1750 	if (WARN_ON_ONCE(nr > 0))
1751 		return -EINVAL;
1752 	return dev->irq;
1753 }
pci_irq_get_affinity(struct pci_dev * pdev,int vec)1754 static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev,
1755 		int vec)
1756 {
1757 	return cpu_possible_mask;
1758 }
1759 
pci_create_ims_domain(struct pci_dev * pdev,const struct msi_domain_template * template,unsigned int hwsize,void * data)1760 static inline bool pci_create_ims_domain(struct pci_dev *pdev,
1761 					 const struct msi_domain_template *template,
1762 					 unsigned int hwsize, void *data)
1763 { return false; }
1764 
pci_ims_alloc_irq(struct pci_dev * pdev,union msi_instance_cookie * icookie,const struct irq_affinity_desc * affdesc)1765 static inline struct msi_map pci_ims_alloc_irq(struct pci_dev *pdev,
1766 					       union msi_instance_cookie *icookie,
1767 					       const struct irq_affinity_desc *affdesc)
1768 {
1769 	struct msi_map map = { .index = -ENOSYS, };
1770 
1771 	return map;
1772 }
1773 
pci_ims_free_irq(struct pci_dev * pdev,struct msi_map map)1774 static inline void pci_ims_free_irq(struct pci_dev *pdev, struct msi_map map)
1775 {
1776 }
1777 
1778 #endif
1779 
1780 /**
1781  * pci_irqd_intx_xlate() - Translate PCI INTx value to an IRQ domain hwirq
1782  * @d: the INTx IRQ domain
1783  * @node: the DT node for the device whose interrupt we're translating
1784  * @intspec: the interrupt specifier data from the DT
1785  * @intsize: the number of entries in @intspec
1786  * @out_hwirq: pointer at which to write the hwirq number
1787  * @out_type: pointer at which to write the interrupt type
1788  *
1789  * Translate a PCI INTx interrupt number from device tree in the range 1-4, as
1790  * stored in the standard PCI_INTERRUPT_PIN register, to a value in the range
1791  * 0-3 suitable for use in a 4 entry IRQ domain. That is, subtract one from the
1792  * INTx value to obtain the hwirq number.
1793  *
1794  * Returns 0 on success, or -EINVAL if the interrupt specifier is out of range.
1795  */
pci_irqd_intx_xlate(struct irq_domain * d,struct device_node * node,const u32 * intspec,unsigned int intsize,unsigned long * out_hwirq,unsigned int * out_type)1796 static inline int pci_irqd_intx_xlate(struct irq_domain *d,
1797 				      struct device_node *node,
1798 				      const u32 *intspec,
1799 				      unsigned int intsize,
1800 				      unsigned long *out_hwirq,
1801 				      unsigned int *out_type)
1802 {
1803 	const u32 intx = intspec[0];
1804 
1805 	if (intx < PCI_INTERRUPT_INTA || intx > PCI_INTERRUPT_INTD)
1806 		return -EINVAL;
1807 
1808 	*out_hwirq = intx - PCI_INTERRUPT_INTA;
1809 	return 0;
1810 }
1811 
1812 #ifdef CONFIG_PCIEPORTBUS
1813 extern bool pcie_ports_disabled;
1814 extern bool pcie_ports_native;
1815 #else
1816 #define pcie_ports_disabled	true
1817 #define pcie_ports_native	false
1818 #endif
1819 
1820 #define PCIE_LINK_STATE_L0S		BIT(0)
1821 #define PCIE_LINK_STATE_L1		BIT(1)
1822 #define PCIE_LINK_STATE_CLKPM		BIT(2)
1823 #define PCIE_LINK_STATE_L1_1		BIT(3)
1824 #define PCIE_LINK_STATE_L1_2		BIT(4)
1825 #define PCIE_LINK_STATE_L1_1_PCIPM	BIT(5)
1826 #define PCIE_LINK_STATE_L1_2_PCIPM	BIT(6)
1827 #define PCIE_LINK_STATE_ALL		(PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |\
1828 					 PCIE_LINK_STATE_CLKPM | PCIE_LINK_STATE_L1_1 |\
1829 					 PCIE_LINK_STATE_L1_2 | PCIE_LINK_STATE_L1_1_PCIPM |\
1830 					 PCIE_LINK_STATE_L1_2_PCIPM)
1831 
1832 #ifdef CONFIG_PCIEASPM
1833 int pci_disable_link_state(struct pci_dev *pdev, int state);
1834 int pci_disable_link_state_locked(struct pci_dev *pdev, int state);
1835 int pci_enable_link_state(struct pci_dev *pdev, int state);
1836 int pci_enable_link_state_locked(struct pci_dev *pdev, int state);
1837 void pcie_no_aspm(void);
1838 bool pcie_aspm_support_enabled(void);
1839 bool pcie_aspm_enabled(struct pci_dev *pdev);
1840 #else
pci_disable_link_state(struct pci_dev * pdev,int state)1841 static inline int pci_disable_link_state(struct pci_dev *pdev, int state)
1842 { return 0; }
pci_disable_link_state_locked(struct pci_dev * pdev,int state)1843 static inline int pci_disable_link_state_locked(struct pci_dev *pdev, int state)
1844 { return 0; }
pci_enable_link_state(struct pci_dev * pdev,int state)1845 static inline int pci_enable_link_state(struct pci_dev *pdev, int state)
1846 { return 0; }
pci_enable_link_state_locked(struct pci_dev * pdev,int state)1847 static inline int pci_enable_link_state_locked(struct pci_dev *pdev, int state)
1848 { return 0; }
pcie_no_aspm(void)1849 static inline void pcie_no_aspm(void) { }
pcie_aspm_support_enabled(void)1850 static inline bool pcie_aspm_support_enabled(void) { return false; }
pcie_aspm_enabled(struct pci_dev * pdev)1851 static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; }
1852 #endif
1853 
1854 #ifdef CONFIG_PCIEAER
1855 bool pci_aer_available(void);
1856 #else
pci_aer_available(void)1857 static inline bool pci_aer_available(void) { return false; }
1858 #endif
1859 
1860 bool pci_ats_disabled(void);
1861 
1862 #ifdef CONFIG_PCIE_PTM
1863 int pci_enable_ptm(struct pci_dev *dev, u8 *granularity);
1864 void pci_disable_ptm(struct pci_dev *dev);
1865 bool pcie_ptm_enabled(struct pci_dev *dev);
1866 #else
pci_enable_ptm(struct pci_dev * dev,u8 * granularity)1867 static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity)
1868 { return -EINVAL; }
pci_disable_ptm(struct pci_dev * dev)1869 static inline void pci_disable_ptm(struct pci_dev *dev) { }
pcie_ptm_enabled(struct pci_dev * dev)1870 static inline bool pcie_ptm_enabled(struct pci_dev *dev)
1871 { return false; }
1872 #endif
1873 
1874 void pci_cfg_access_lock(struct pci_dev *dev);
1875 bool pci_cfg_access_trylock(struct pci_dev *dev);
1876 void pci_cfg_access_unlock(struct pci_dev *dev);
1877 
1878 void pci_dev_lock(struct pci_dev *dev);
1879 int pci_dev_trylock(struct pci_dev *dev);
1880 void pci_dev_unlock(struct pci_dev *dev);
1881 DEFINE_GUARD(pci_dev, struct pci_dev *, pci_dev_lock(_T), pci_dev_unlock(_T))
1882 
1883 /*
1884  * PCI domain support.  Sometimes called PCI segment (eg by ACPI),
1885  * a PCI domain is defined to be a set of PCI buses which share
1886  * configuration space.
1887  */
1888 #ifdef CONFIG_PCI_DOMAINS
1889 extern int pci_domains_supported;
1890 #else
1891 enum { pci_domains_supported = 0 };
1892 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
1893 static inline int pci_proc_domain(struct pci_bus *bus) { return 0; }
1894 #endif /* CONFIG_PCI_DOMAINS */
1895 
1896 /*
1897  * Generic implementation for PCI domain support. If your
1898  * architecture does not need custom management of PCI
1899  * domains then this implementation will be used
1900  */
1901 #ifdef CONFIG_PCI_DOMAINS_GENERIC
pci_domain_nr(struct pci_bus * bus)1902 static inline int pci_domain_nr(struct pci_bus *bus)
1903 {
1904 	return bus->domain_nr;
1905 }
1906 #ifdef CONFIG_ACPI
1907 int acpi_pci_bus_find_domain_nr(struct pci_bus *bus);
1908 #else
acpi_pci_bus_find_domain_nr(struct pci_bus * bus)1909 static inline int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
1910 { return 0; }
1911 #endif
1912 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent);
1913 void pci_bus_release_domain_nr(struct pci_bus *bus, struct device *parent);
1914 #endif
1915 
1916 /* Some architectures require additional setup to direct VGA traffic */
1917 typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
1918 				    unsigned int command_bits, u32 flags);
1919 void pci_register_set_vga_state(arch_set_vga_state_t func);
1920 
1921 static inline int
pci_request_io_regions(struct pci_dev * pdev,const char * name)1922 pci_request_io_regions(struct pci_dev *pdev, const char *name)
1923 {
1924 	return pci_request_selected_regions(pdev,
1925 			    pci_select_bars(pdev, IORESOURCE_IO), name);
1926 }
1927 
1928 static inline void
pci_release_io_regions(struct pci_dev * pdev)1929 pci_release_io_regions(struct pci_dev *pdev)
1930 {
1931 	return pci_release_selected_regions(pdev,
1932 			    pci_select_bars(pdev, IORESOURCE_IO));
1933 }
1934 
1935 static inline int
pci_request_mem_regions(struct pci_dev * pdev,const char * name)1936 pci_request_mem_regions(struct pci_dev *pdev, const char *name)
1937 {
1938 	return pci_request_selected_regions(pdev,
1939 			    pci_select_bars(pdev, IORESOURCE_MEM), name);
1940 }
1941 
1942 static inline void
pci_release_mem_regions(struct pci_dev * pdev)1943 pci_release_mem_regions(struct pci_dev *pdev)
1944 {
1945 	return pci_release_selected_regions(pdev,
1946 			    pci_select_bars(pdev, IORESOURCE_MEM));
1947 }
1948 
1949 #else /* CONFIG_PCI is not enabled */
1950 
pci_set_flags(int flags)1951 static inline void pci_set_flags(int flags) { }
pci_add_flags(int flags)1952 static inline void pci_add_flags(int flags) { }
pci_clear_flags(int flags)1953 static inline void pci_clear_flags(int flags) { }
pci_has_flag(int flag)1954 static inline int pci_has_flag(int flag) { return 0; }
1955 
1956 /*
1957  * If the system does not have PCI, clearly these return errors.  Define
1958  * these as simple inline functions to avoid hair in drivers.
1959  */
1960 #define _PCI_NOP(o, s, t) \
1961 	static inline int pci_##o##_config_##s(struct pci_dev *dev, \
1962 						int where, t val) \
1963 		{ return PCIBIOS_FUNC_NOT_SUPPORTED; }
1964 
1965 #define _PCI_NOP_ALL(o, x)	_PCI_NOP(o, byte, u8 x) \
1966 				_PCI_NOP(o, word, u16 x) \
1967 				_PCI_NOP(o, dword, u32 x)
1968 _PCI_NOP_ALL(read, *)
1969 _PCI_NOP_ALL(write,)
1970 
pci_get_device(unsigned int vendor,unsigned int device,struct pci_dev * from)1971 static inline struct pci_dev *pci_get_device(unsigned int vendor,
1972 					     unsigned int device,
1973 					     struct pci_dev *from)
1974 { return NULL; }
1975 
pci_get_subsys(unsigned int vendor,unsigned int device,unsigned int ss_vendor,unsigned int ss_device,struct pci_dev * from)1976 static inline struct pci_dev *pci_get_subsys(unsigned int vendor,
1977 					     unsigned int device,
1978 					     unsigned int ss_vendor,
1979 					     unsigned int ss_device,
1980 					     struct pci_dev *from)
1981 { return NULL; }
1982 
pci_get_class(unsigned int class,struct pci_dev * from)1983 static inline struct pci_dev *pci_get_class(unsigned int class,
1984 					    struct pci_dev *from)
1985 { return NULL; }
1986 
pci_get_base_class(unsigned int class,struct pci_dev * from)1987 static inline struct pci_dev *pci_get_base_class(unsigned int class,
1988 						 struct pci_dev *from)
1989 { return NULL; }
1990 
pci_dev_present(const struct pci_device_id * ids)1991 static inline int pci_dev_present(const struct pci_device_id *ids)
1992 { return 0; }
1993 
1994 #define no_pci_devices()	(1)
1995 #define pci_dev_put(dev)	do { } while (0)
1996 
pci_set_master(struct pci_dev * dev)1997 static inline void pci_set_master(struct pci_dev *dev) { }
pci_clear_master(struct pci_dev * dev)1998 static inline void pci_clear_master(struct pci_dev *dev) { }
pci_enable_device(struct pci_dev * dev)1999 static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
pci_disable_device(struct pci_dev * dev)2000 static inline void pci_disable_device(struct pci_dev *dev) { }
pcim_enable_device(struct pci_dev * pdev)2001 static inline int pcim_enable_device(struct pci_dev *pdev) { return -EIO; }
pci_assign_resource(struct pci_dev * dev,int i)2002 static inline int pci_assign_resource(struct pci_dev *dev, int i)
2003 { return -EBUSY; }
__pci_register_driver(struct pci_driver * drv,struct module * owner,const char * mod_name)2004 static inline int __must_check __pci_register_driver(struct pci_driver *drv,
2005 						     struct module *owner,
2006 						     const char *mod_name)
2007 { return 0; }
pci_register_driver(struct pci_driver * drv)2008 static inline int pci_register_driver(struct pci_driver *drv)
2009 { return 0; }
pci_unregister_driver(struct pci_driver * drv)2010 static inline void pci_unregister_driver(struct pci_driver *drv) { }
pci_find_capability(struct pci_dev * dev,int cap)2011 static inline u8 pci_find_capability(struct pci_dev *dev, int cap)
2012 { return 0; }
pci_find_next_capability(struct pci_dev * dev,u8 post,int cap)2013 static inline int pci_find_next_capability(struct pci_dev *dev, u8 post,
2014 					   int cap)
2015 { return 0; }
pci_find_ext_capability(struct pci_dev * dev,int cap)2016 static inline int pci_find_ext_capability(struct pci_dev *dev, int cap)
2017 { return 0; }
2018 
pci_get_dsn(struct pci_dev * dev)2019 static inline u64 pci_get_dsn(struct pci_dev *dev)
2020 { return 0; }
2021 
2022 /* Power management related routines */
pci_save_state(struct pci_dev * dev)2023 static inline int pci_save_state(struct pci_dev *dev) { return 0; }
pci_restore_state(struct pci_dev * dev)2024 static inline void pci_restore_state(struct pci_dev *dev) { }
pci_set_power_state(struct pci_dev * dev,pci_power_t state)2025 static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
2026 { return 0; }
pci_set_power_state_locked(struct pci_dev * dev,pci_power_t state)2027 static inline int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state)
2028 { return 0; }
pci_wake_from_d3(struct pci_dev * dev,bool enable)2029 static inline int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2030 { return 0; }
pci_choose_state(struct pci_dev * dev,pm_message_t state)2031 static inline pci_power_t pci_choose_state(struct pci_dev *dev,
2032 					   pm_message_t state)
2033 { return PCI_D0; }
pci_enable_wake(struct pci_dev * dev,pci_power_t state,int enable)2034 static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
2035 				  int enable)
2036 { return 0; }
2037 
pci_find_resource(struct pci_dev * dev,struct resource * res)2038 static inline struct resource *pci_find_resource(struct pci_dev *dev,
2039 						 struct resource *res)
2040 { return NULL; }
pci_request_regions(struct pci_dev * dev,const char * res_name)2041 static inline int pci_request_regions(struct pci_dev *dev, const char *res_name)
2042 { return -EIO; }
pci_release_regions(struct pci_dev * dev)2043 static inline void pci_release_regions(struct pci_dev *dev) { }
2044 
pci_register_io_range(struct fwnode_handle * fwnode,phys_addr_t addr,resource_size_t size)2045 static inline int pci_register_io_range(struct fwnode_handle *fwnode,
2046 					phys_addr_t addr, resource_size_t size)
2047 { return -EINVAL; }
2048 
pci_address_to_pio(phys_addr_t addr)2049 static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; }
2050 
pci_find_next_bus(const struct pci_bus * from)2051 static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from)
2052 { return NULL; }
pci_get_slot(struct pci_bus * bus,unsigned int devfn)2053 static inline struct pci_dev *pci_get_slot(struct pci_bus *bus,
2054 						unsigned int devfn)
2055 { return NULL; }
pci_get_domain_bus_and_slot(int domain,unsigned int bus,unsigned int devfn)2056 static inline struct pci_dev *pci_get_domain_bus_and_slot(int domain,
2057 					unsigned int bus, unsigned int devfn)
2058 { return NULL; }
2059 
pci_domain_nr(struct pci_bus * bus)2060 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
pci_dev_get(struct pci_dev * dev)2061 static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; }
2062 
2063 #define dev_is_pci(d) (false)
2064 #define dev_is_pf(d) (false)
pci_acs_enabled(struct pci_dev * pdev,u16 acs_flags)2065 static inline bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
2066 { return false; }
pci_irqd_intx_xlate(struct irq_domain * d,struct device_node * node,const u32 * intspec,unsigned int intsize,unsigned long * out_hwirq,unsigned int * out_type)2067 static inline int pci_irqd_intx_xlate(struct irq_domain *d,
2068 				      struct device_node *node,
2069 				      const u32 *intspec,
2070 				      unsigned int intsize,
2071 				      unsigned long *out_hwirq,
2072 				      unsigned int *out_type)
2073 { return -EINVAL; }
2074 
pci_match_id(const struct pci_device_id * ids,struct pci_dev * dev)2075 static inline const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
2076 							 struct pci_dev *dev)
2077 { return NULL; }
pci_ats_disabled(void)2078 static inline bool pci_ats_disabled(void) { return true; }
2079 
pci_irq_vector(struct pci_dev * dev,unsigned int nr)2080 static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
2081 {
2082 	return -EINVAL;
2083 }
2084 
2085 static inline int
pci_alloc_irq_vectors_affinity(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags,struct irq_affinity * aff_desc)2086 pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
2087 			       unsigned int max_vecs, unsigned int flags,
2088 			       struct irq_affinity *aff_desc)
2089 {
2090 	return -ENOSPC;
2091 }
2092 static inline int
pci_alloc_irq_vectors(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags)2093 pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
2094 		      unsigned int max_vecs, unsigned int flags)
2095 {
2096 	return -ENOSPC;
2097 }
2098 #endif /* CONFIG_PCI */
2099 
2100 /* Include architecture-dependent settings and functions */
2101 
2102 #include <asm/pci.h>
2103 
2104 /*
2105  * pci_mmap_resource_range() maps a specific BAR, and vm->vm_pgoff
2106  * is expected to be an offset within that region.
2107  *
2108  */
2109 int pci_mmap_resource_range(struct pci_dev *dev, int bar,
2110 			    struct vm_area_struct *vma,
2111 			    enum pci_mmap_state mmap_state, int write_combine);
2112 
2113 #ifndef arch_can_pci_mmap_wc
2114 #define arch_can_pci_mmap_wc()		0
2115 #endif
2116 
2117 #ifndef arch_can_pci_mmap_io
2118 #define arch_can_pci_mmap_io()		0
2119 #define pci_iobar_pfn(pdev, bar, vma) (-EINVAL)
2120 #else
2121 int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma);
2122 #endif
2123 
2124 #ifndef pci_root_bus_fwnode
2125 #define pci_root_bus_fwnode(bus)	NULL
2126 #endif
2127 
2128 /*
2129  * These helpers provide future and backwards compatibility
2130  * for accessing popular PCI BAR info
2131  */
2132 #define pci_resource_n(dev, bar)	(&(dev)->resource[(bar)])
2133 #define pci_resource_start(dev, bar)	(pci_resource_n(dev, bar)->start)
2134 #define pci_resource_end(dev, bar)	(pci_resource_n(dev, bar)->end)
2135 #define pci_resource_flags(dev, bar)	(pci_resource_n(dev, bar)->flags)
2136 #define pci_resource_len(dev,bar)					\
2137 	(pci_resource_end((dev), (bar)) ? 				\
2138 	 resource_size(pci_resource_n((dev), (bar))) : 0)
2139 
2140 #define __pci_dev_for_each_res0(dev, res, ...)				  \
2141 	for (unsigned int __b = 0;					  \
2142 	     __b < PCI_NUM_RESOURCES && (res = pci_resource_n(dev, __b)); \
2143 	     __b++)
2144 
2145 #define __pci_dev_for_each_res1(dev, res, __b)				  \
2146 	for (__b = 0;							  \
2147 	     __b < PCI_NUM_RESOURCES && (res = pci_resource_n(dev, __b)); \
2148 	     __b++)
2149 
2150 #define pci_dev_for_each_resource(dev, res, ...)			\
2151 	CONCATENATE(__pci_dev_for_each_res, COUNT_ARGS(__VA_ARGS__)) 	\
2152 		    (dev, res, __VA_ARGS__)
2153 
2154 /*
2155  * Similar to the helpers above, these manipulate per-pci_dev
2156  * driver-specific data.  They are really just a wrapper around
2157  * the generic device structure functions of these calls.
2158  */
pci_get_drvdata(struct pci_dev * pdev)2159 static inline void *pci_get_drvdata(struct pci_dev *pdev)
2160 {
2161 	return dev_get_drvdata(&pdev->dev);
2162 }
2163 
pci_set_drvdata(struct pci_dev * pdev,void * data)2164 static inline void pci_set_drvdata(struct pci_dev *pdev, void *data)
2165 {
2166 	dev_set_drvdata(&pdev->dev, data);
2167 }
2168 
pci_name(const struct pci_dev * pdev)2169 static inline const char *pci_name(const struct pci_dev *pdev)
2170 {
2171 	return dev_name(&pdev->dev);
2172 }
2173 
2174 void pci_resource_to_user(const struct pci_dev *dev, int bar,
2175 			  const struct resource *rsrc,
2176 			  resource_size_t *start, resource_size_t *end);
2177 
2178 /*
2179  * The world is not perfect and supplies us with broken PCI devices.
2180  * For at least a part of these bugs we need a work-around, so both
2181  * generic (drivers/pci/quirks.c) and per-architecture code can define
2182  * fixup hooks to be called for particular buggy devices.
2183  */
2184 
2185 struct pci_fixup {
2186 	u16 vendor;			/* Or PCI_ANY_ID */
2187 	u16 device;			/* Or PCI_ANY_ID */
2188 	u32 class;			/* Or PCI_ANY_ID */
2189 	unsigned int class_shift;	/* should be 0, 8, 16 */
2190 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
2191 	int hook_offset;
2192 #else
2193 	void (*hook)(struct pci_dev *dev);
2194 #endif
2195 };
2196 
2197 enum pci_fixup_pass {
2198 	pci_fixup_early,	/* Before probing BARs */
2199 	pci_fixup_header,	/* After reading configuration header */
2200 	pci_fixup_final,	/* Final phase of device fixups */
2201 	pci_fixup_enable,	/* pci_enable_device() time */
2202 	pci_fixup_resume,	/* pci_device_resume() */
2203 	pci_fixup_suspend,	/* pci_device_suspend() */
2204 	pci_fixup_resume_early, /* pci_device_resume_early() */
2205 	pci_fixup_suspend_late,	/* pci_device_suspend_late() */
2206 };
2207 
2208 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
2209 #define ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2210 				    class_shift, hook)			\
2211 	__ADDRESSABLE(hook)						\
2212 	asm(".section "	#sec ", \"a\"				\n"	\
2213 	    ".balign	16					\n"	\
2214 	    ".short "	#vendor ", " #device "			\n"	\
2215 	    ".long "	#class ", " #class_shift "		\n"	\
2216 	    ".long "	#hook " - .				\n"	\
2217 	    ".previous						\n");
2218 
2219 /*
2220  * Clang's LTO may rename static functions in C, but has no way to
2221  * handle such renamings when referenced from inline asm. To work
2222  * around this, create global C stubs for these cases.
2223  */
2224 #ifdef CONFIG_LTO_CLANG
2225 #define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2226 				  class_shift, hook, stub)		\
2227 	void stub(struct pci_dev *dev);					\
2228 	void stub(struct pci_dev *dev)					\
2229 	{ 								\
2230 		hook(dev); 						\
2231 	}								\
2232 	___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2233 				  class_shift, stub)
2234 #else
2235 #define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2236 				  class_shift, hook, stub)		\
2237 	___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2238 				  class_shift, hook)
2239 #endif
2240 
2241 #define DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2242 				  class_shift, hook)			\
2243 	__DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2244 				  class_shift, hook, __UNIQUE_ID(hook))
2245 #else
2246 /* Anonymous variables would be nice... */
2247 #define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class,	\
2248 				  class_shift, hook)			\
2249 	static const struct pci_fixup __PASTE(__pci_fixup_##name,__LINE__) __used	\
2250 	__attribute__((__section__(#section), aligned((sizeof(void *)))))    \
2251 		= { vendor, device, class, class_shift, hook };
2252 #endif
2253 
2254 #define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class,		\
2255 					 class_shift, hook)		\
2256 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early,			\
2257 		hook, vendor, device, class, class_shift, hook)
2258 #define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class,		\
2259 					 class_shift, hook)		\
2260 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header,			\
2261 		hook, vendor, device, class, class_shift, hook)
2262 #define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class,		\
2263 					 class_shift, hook)		\
2264 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final,			\
2265 		hook, vendor, device, class, class_shift, hook)
2266 #define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class,		\
2267 					 class_shift, hook)		\
2268 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable,			\
2269 		hook, vendor, device, class, class_shift, hook)
2270 #define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class,		\
2271 					 class_shift, hook)		\
2272 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume,			\
2273 		resume##hook, vendor, device, class, class_shift, hook)
2274 #define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class,	\
2275 					 class_shift, hook)		\
2276 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early,		\
2277 		resume_early##hook, vendor, device, class, class_shift, hook)
2278 #define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class,		\
2279 					 class_shift, hook)		\
2280 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend,			\
2281 		suspend##hook, vendor, device, class, class_shift, hook)
2282 #define DECLARE_PCI_FIXUP_CLASS_SUSPEND_LATE(vendor, device, class,	\
2283 					 class_shift, hook)		\
2284 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late,		\
2285 		suspend_late##hook, vendor, device, class, class_shift, hook)
2286 
2287 #define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook)			\
2288 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early,			\
2289 		hook, vendor, device, PCI_ANY_ID, 0, hook)
2290 #define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook)			\
2291 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header,			\
2292 		hook, vendor, device, PCI_ANY_ID, 0, hook)
2293 #define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook)			\
2294 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final,			\
2295 		hook, vendor, device, PCI_ANY_ID, 0, hook)
2296 #define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook)			\
2297 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable,			\
2298 		hook, vendor, device, PCI_ANY_ID, 0, hook)
2299 #define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook)			\
2300 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume,			\
2301 		resume##hook, vendor, device, PCI_ANY_ID, 0, hook)
2302 #define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook)		\
2303 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early,		\
2304 		resume_early##hook, vendor, device, PCI_ANY_ID, 0, hook)
2305 #define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook)			\
2306 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend,			\
2307 		suspend##hook, vendor, device, PCI_ANY_ID, 0, hook)
2308 #define DECLARE_PCI_FIXUP_SUSPEND_LATE(vendor, device, hook)		\
2309 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late,		\
2310 		suspend_late##hook, vendor, device, PCI_ANY_ID, 0, hook)
2311 
2312 #ifdef CONFIG_PCI_QUIRKS
2313 void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
2314 #else
pci_fixup_device(enum pci_fixup_pass pass,struct pci_dev * dev)2315 static inline void pci_fixup_device(enum pci_fixup_pass pass,
2316 				    struct pci_dev *dev) { }
2317 #endif
2318 
2319 void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
2320 void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr);
2321 void __iomem * const *pcim_iomap_table(struct pci_dev *pdev);
2322 int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name);
2323 int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
2324 				   const char *name);
2325 void pcim_iounmap_regions(struct pci_dev *pdev, int mask);
2326 
2327 extern int pci_pci_problems;
2328 #define PCIPCI_FAIL		1	/* No PCI PCI DMA */
2329 #define PCIPCI_TRITON		2
2330 #define PCIPCI_NATOMA		4
2331 #define PCIPCI_VIAETBF		8
2332 #define PCIPCI_VSFX		16
2333 #define PCIPCI_ALIMAGIK		32	/* Need low latency setting */
2334 #define PCIAGP_FAIL		64	/* No PCI to AGP DMA */
2335 
2336 extern unsigned long pci_cardbus_io_size;
2337 extern unsigned long pci_cardbus_mem_size;
2338 extern u8 pci_dfl_cache_line_size;
2339 extern u8 pci_cache_line_size;
2340 
2341 /* Architecture-specific versions may override these (weak) */
2342 void pcibios_disable_device(struct pci_dev *dev);
2343 void pcibios_set_master(struct pci_dev *dev);
2344 int pcibios_set_pcie_reset_state(struct pci_dev *dev,
2345 				 enum pcie_reset_state state);
2346 int pcibios_device_add(struct pci_dev *dev);
2347 void pcibios_release_device(struct pci_dev *dev);
2348 #ifdef CONFIG_PCI
2349 void pcibios_penalize_isa_irq(int irq, int active);
2350 #else
pcibios_penalize_isa_irq(int irq,int active)2351 static inline void pcibios_penalize_isa_irq(int irq, int active) {}
2352 #endif
2353 int pcibios_alloc_irq(struct pci_dev *dev);
2354 void pcibios_free_irq(struct pci_dev *dev);
2355 resource_size_t pcibios_default_alignment(void);
2356 
2357 #if !defined(HAVE_PCI_MMAP) && !defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)
2358 extern int pci_create_resource_files(struct pci_dev *dev);
2359 extern void pci_remove_resource_files(struct pci_dev *dev);
2360 #endif
2361 
2362 #if defined(CONFIG_PCI_MMCONFIG) || defined(CONFIG_ACPI_MCFG)
2363 void __init pci_mmcfg_early_init(void);
2364 void __init pci_mmcfg_late_init(void);
2365 #else
pci_mmcfg_early_init(void)2366 static inline void pci_mmcfg_early_init(void) { }
pci_mmcfg_late_init(void)2367 static inline void pci_mmcfg_late_init(void) { }
2368 #endif
2369 
2370 int pci_ext_cfg_avail(void);
2371 
2372 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar);
2373 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar);
2374 
2375 #ifdef CONFIG_PCI_IOV
2376 int pci_iov_virtfn_bus(struct pci_dev *dev, int id);
2377 int pci_iov_virtfn_devfn(struct pci_dev *dev, int id);
2378 int pci_iov_vf_id(struct pci_dev *dev);
2379 void *pci_iov_get_pf_drvdata(struct pci_dev *dev, struct pci_driver *pf_driver);
2380 int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
2381 void pci_disable_sriov(struct pci_dev *dev);
2382 
2383 int pci_iov_sysfs_link(struct pci_dev *dev, struct pci_dev *virtfn, int id);
2384 int pci_iov_add_virtfn(struct pci_dev *dev, int id);
2385 void pci_iov_remove_virtfn(struct pci_dev *dev, int id);
2386 int pci_num_vf(struct pci_dev *dev);
2387 int pci_vfs_assigned(struct pci_dev *dev);
2388 int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
2389 int pci_sriov_get_totalvfs(struct pci_dev *dev);
2390 int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn);
2391 resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno);
2392 void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe);
2393 
2394 /* Arch may override these (weak) */
2395 int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs);
2396 int pcibios_sriov_disable(struct pci_dev *pdev);
2397 resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno);
2398 #else
pci_iov_virtfn_bus(struct pci_dev * dev,int id)2399 static inline int pci_iov_virtfn_bus(struct pci_dev *dev, int id)
2400 {
2401 	return -ENOSYS;
2402 }
pci_iov_virtfn_devfn(struct pci_dev * dev,int id)2403 static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id)
2404 {
2405 	return -ENOSYS;
2406 }
2407 
pci_iov_vf_id(struct pci_dev * dev)2408 static inline int pci_iov_vf_id(struct pci_dev *dev)
2409 {
2410 	return -ENOSYS;
2411 }
2412 
pci_iov_get_pf_drvdata(struct pci_dev * dev,struct pci_driver * pf_driver)2413 static inline void *pci_iov_get_pf_drvdata(struct pci_dev *dev,
2414 					   struct pci_driver *pf_driver)
2415 {
2416 	return ERR_PTR(-EINVAL);
2417 }
2418 
pci_enable_sriov(struct pci_dev * dev,int nr_virtfn)2419 static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
2420 { return -ENODEV; }
2421 
pci_iov_sysfs_link(struct pci_dev * dev,struct pci_dev * virtfn,int id)2422 static inline int pci_iov_sysfs_link(struct pci_dev *dev,
2423 				     struct pci_dev *virtfn, int id)
2424 {
2425 	return -ENODEV;
2426 }
pci_iov_add_virtfn(struct pci_dev * dev,int id)2427 static inline int pci_iov_add_virtfn(struct pci_dev *dev, int id)
2428 {
2429 	return -ENOSYS;
2430 }
pci_iov_remove_virtfn(struct pci_dev * dev,int id)2431 static inline void pci_iov_remove_virtfn(struct pci_dev *dev,
2432 					 int id) { }
pci_disable_sriov(struct pci_dev * dev)2433 static inline void pci_disable_sriov(struct pci_dev *dev) { }
pci_num_vf(struct pci_dev * dev)2434 static inline int pci_num_vf(struct pci_dev *dev) { return 0; }
pci_vfs_assigned(struct pci_dev * dev)2435 static inline int pci_vfs_assigned(struct pci_dev *dev)
2436 { return 0; }
pci_sriov_set_totalvfs(struct pci_dev * dev,u16 numvfs)2437 static inline int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
2438 { return 0; }
pci_sriov_get_totalvfs(struct pci_dev * dev)2439 static inline int pci_sriov_get_totalvfs(struct pci_dev *dev)
2440 { return 0; }
2441 #define pci_sriov_configure_simple	NULL
pci_iov_resource_size(struct pci_dev * dev,int resno)2442 static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
2443 { return 0; }
pci_vf_drivers_autoprobe(struct pci_dev * dev,bool probe)2444 static inline void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe) { }
2445 #endif
2446 
2447 #if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
2448 void pci_hp_create_module_link(struct pci_slot *pci_slot);
2449 void pci_hp_remove_module_link(struct pci_slot *pci_slot);
2450 #endif
2451 
2452 /**
2453  * pci_pcie_cap - get the saved PCIe capability offset
2454  * @dev: PCI device
2455  *
2456  * PCIe capability offset is calculated at PCI device initialization
2457  * time and saved in the data structure. This function returns saved
2458  * PCIe capability offset. Using this instead of pci_find_capability()
2459  * reduces unnecessary search in the PCI configuration space. If you
2460  * need to calculate PCIe capability offset from raw device for some
2461  * reasons, please use pci_find_capability() instead.
2462  */
pci_pcie_cap(struct pci_dev * dev)2463 static inline int pci_pcie_cap(struct pci_dev *dev)
2464 {
2465 	return dev->pcie_cap;
2466 }
2467 
2468 /**
2469  * pci_is_pcie - check if the PCI device is PCI Express capable
2470  * @dev: PCI device
2471  *
2472  * Returns: true if the PCI device is PCI Express capable, false otherwise.
2473  */
pci_is_pcie(struct pci_dev * dev)2474 static inline bool pci_is_pcie(struct pci_dev *dev)
2475 {
2476 	return pci_pcie_cap(dev);
2477 }
2478 
2479 /**
2480  * pcie_caps_reg - get the PCIe Capabilities Register
2481  * @dev: PCI device
2482  */
pcie_caps_reg(const struct pci_dev * dev)2483 static inline u16 pcie_caps_reg(const struct pci_dev *dev)
2484 {
2485 	return dev->pcie_flags_reg;
2486 }
2487 
2488 /**
2489  * pci_pcie_type - get the PCIe device/port type
2490  * @dev: PCI device
2491  */
pci_pcie_type(const struct pci_dev * dev)2492 static inline int pci_pcie_type(const struct pci_dev *dev)
2493 {
2494 	return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
2495 }
2496 
2497 /**
2498  * pcie_find_root_port - Get the PCIe root port device
2499  * @dev: PCI device
2500  *
2501  * Traverse up the parent chain and return the PCIe Root Port PCI Device
2502  * for a given PCI/PCIe Device.
2503  */
pcie_find_root_port(struct pci_dev * dev)2504 static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
2505 {
2506 	while (dev) {
2507 		if (pci_is_pcie(dev) &&
2508 		    pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
2509 			return dev;
2510 		dev = pci_upstream_bridge(dev);
2511 	}
2512 
2513 	return NULL;
2514 }
2515 
pci_dev_is_disconnected(const struct pci_dev * dev)2516 static inline bool pci_dev_is_disconnected(const struct pci_dev *dev)
2517 {
2518 	/*
2519 	 * error_state is set in pci_dev_set_io_state() using xchg/cmpxchg()
2520 	 * and read w/o common lock. READ_ONCE() ensures compiler cannot cache
2521 	 * the value (e.g. inside the loop in pci_dev_wait()).
2522 	 */
2523 	return READ_ONCE(dev->error_state) == pci_channel_io_perm_failure;
2524 }
2525 
2526 void pci_request_acs(void);
2527 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
2528 bool pci_acs_path_enabled(struct pci_dev *start,
2529 			  struct pci_dev *end, u16 acs_flags);
2530 int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask);
2531 
2532 #define PCI_VPD_LRDT			0x80	/* Large Resource Data Type */
2533 #define PCI_VPD_LRDT_ID(x)		((x) | PCI_VPD_LRDT)
2534 
2535 /* Large Resource Data Type Tag Item Names */
2536 #define PCI_VPD_LTIN_ID_STRING		0x02	/* Identifier String */
2537 #define PCI_VPD_LTIN_RO_DATA		0x10	/* Read-Only Data */
2538 #define PCI_VPD_LTIN_RW_DATA		0x11	/* Read-Write Data */
2539 
2540 #define PCI_VPD_LRDT_ID_STRING		PCI_VPD_LRDT_ID(PCI_VPD_LTIN_ID_STRING)
2541 #define PCI_VPD_LRDT_RO_DATA		PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RO_DATA)
2542 #define PCI_VPD_LRDT_RW_DATA		PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA)
2543 
2544 #define PCI_VPD_RO_KEYWORD_PARTNO	"PN"
2545 #define PCI_VPD_RO_KEYWORD_SERIALNO	"SN"
2546 #define PCI_VPD_RO_KEYWORD_MFR_ID	"MN"
2547 #define PCI_VPD_RO_KEYWORD_VENDOR0	"V0"
2548 #define PCI_VPD_RO_KEYWORD_CHKSUM	"RV"
2549 
2550 /**
2551  * pci_vpd_alloc - Allocate buffer and read VPD into it
2552  * @dev: PCI device
2553  * @size: pointer to field where VPD length is returned
2554  *
2555  * Returns pointer to allocated buffer or an ERR_PTR in case of failure
2556  */
2557 void *pci_vpd_alloc(struct pci_dev *dev, unsigned int *size);
2558 
2559 /**
2560  * pci_vpd_find_id_string - Locate id string in VPD
2561  * @buf: Pointer to buffered VPD data
2562  * @len: The length of the buffer area in which to search
2563  * @size: Pointer to field where length of id string is returned
2564  *
2565  * Returns the index of the id string or -ENOENT if not found.
2566  */
2567 int pci_vpd_find_id_string(const u8 *buf, unsigned int len, unsigned int *size);
2568 
2569 /**
2570  * pci_vpd_find_ro_info_keyword - Locate info field keyword in VPD RO section
2571  * @buf: Pointer to buffered VPD data
2572  * @len: The length of the buffer area in which to search
2573  * @kw: The keyword to search for
2574  * @size: Pointer to field where length of found keyword data is returned
2575  *
2576  * Returns the index of the information field keyword data or -ENOENT if
2577  * not found.
2578  */
2579 int pci_vpd_find_ro_info_keyword(const void *buf, unsigned int len,
2580 				 const char *kw, unsigned int *size);
2581 
2582 /**
2583  * pci_vpd_check_csum - Check VPD checksum
2584  * @buf: Pointer to buffered VPD data
2585  * @len: VPD size
2586  *
2587  * Returns 1 if VPD has no checksum, otherwise 0 or an errno
2588  */
2589 int pci_vpd_check_csum(const void *buf, unsigned int len);
2590 
2591 /* PCI <-> OF binding helpers */
2592 #ifdef CONFIG_OF
2593 struct device_node;
2594 struct irq_domain;
2595 struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
2596 bool pci_host_of_has_msi_map(struct device *dev);
2597 
2598 /* Arch may override this (weak) */
2599 struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
2600 
2601 #else	/* CONFIG_OF */
2602 static inline struct irq_domain *
pci_host_bridge_of_msi_domain(struct pci_bus * bus)2603 pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
pci_host_of_has_msi_map(struct device * dev)2604 static inline bool pci_host_of_has_msi_map(struct device *dev) { return false; }
2605 #endif  /* CONFIG_OF */
2606 
2607 static inline struct device_node *
pci_device_to_OF_node(const struct pci_dev * pdev)2608 pci_device_to_OF_node(const struct pci_dev *pdev)
2609 {
2610 	return pdev ? pdev->dev.of_node : NULL;
2611 }
2612 
pci_bus_to_OF_node(struct pci_bus * bus)2613 static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
2614 {
2615 	return bus ? bus->dev.of_node : NULL;
2616 }
2617 
2618 #ifdef CONFIG_ACPI
2619 struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus);
2620 
2621 void
2622 pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *));
2623 bool pci_pr3_present(struct pci_dev *pdev);
2624 #else
2625 static inline struct irq_domain *
pci_host_bridge_acpi_msi_domain(struct pci_bus * bus)2626 pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) { return NULL; }
pci_pr3_present(struct pci_dev * pdev)2627 static inline bool pci_pr3_present(struct pci_dev *pdev) { return false; }
2628 #endif
2629 
2630 #ifdef CONFIG_EEH
pci_dev_to_eeh_dev(struct pci_dev * pdev)2631 static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev)
2632 {
2633 	return pdev->dev.archdata.edev;
2634 }
2635 #endif
2636 
2637 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns);
2638 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2);
2639 int pci_for_each_dma_alias(struct pci_dev *pdev,
2640 			   int (*fn)(struct pci_dev *pdev,
2641 				     u16 alias, void *data), void *data);
2642 
2643 /* Helper functions for operation of device flag */
pci_set_dev_assigned(struct pci_dev * pdev)2644 static inline void pci_set_dev_assigned(struct pci_dev *pdev)
2645 {
2646 	pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
2647 }
pci_clear_dev_assigned(struct pci_dev * pdev)2648 static inline void pci_clear_dev_assigned(struct pci_dev *pdev)
2649 {
2650 	pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
2651 }
pci_is_dev_assigned(struct pci_dev * pdev)2652 static inline bool pci_is_dev_assigned(struct pci_dev *pdev)
2653 {
2654 	return (pdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) == PCI_DEV_FLAGS_ASSIGNED;
2655 }
2656 
2657 /**
2658  * pci_ari_enabled - query ARI forwarding status
2659  * @bus: the PCI bus
2660  *
2661  * Returns true if ARI forwarding is enabled.
2662  */
pci_ari_enabled(struct pci_bus * bus)2663 static inline bool pci_ari_enabled(struct pci_bus *bus)
2664 {
2665 	return bus->self && bus->self->ari_enabled;
2666 }
2667 
2668 /**
2669  * pci_is_thunderbolt_attached - whether device is on a Thunderbolt daisy chain
2670  * @pdev: PCI device to check
2671  *
2672  * Walk upwards from @pdev and check for each encountered bridge if it's part
2673  * of a Thunderbolt controller.  Reaching the host bridge means @pdev is not
2674  * Thunderbolt-attached.  (But rather soldered to the mainboard usually.)
2675  */
pci_is_thunderbolt_attached(struct pci_dev * pdev)2676 static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev)
2677 {
2678 	struct pci_dev *parent = pdev;
2679 
2680 	if (pdev->is_thunderbolt)
2681 		return true;
2682 
2683 	while ((parent = pci_upstream_bridge(parent)))
2684 		if (parent->is_thunderbolt)
2685 			return true;
2686 
2687 	return false;
2688 }
2689 
2690 #if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH)
2691 void pci_uevent_ers(struct pci_dev *pdev, enum  pci_ers_result err_type);
2692 #endif
2693 
2694 #include <linux/dma-mapping.h>
2695 
2696 #define pci_printk(level, pdev, fmt, arg...) \
2697 	dev_printk(level, &(pdev)->dev, fmt, ##arg)
2698 
2699 #define pci_emerg(pdev, fmt, arg...)	dev_emerg(&(pdev)->dev, fmt, ##arg)
2700 #define pci_alert(pdev, fmt, arg...)	dev_alert(&(pdev)->dev, fmt, ##arg)
2701 #define pci_crit(pdev, fmt, arg...)	dev_crit(&(pdev)->dev, fmt, ##arg)
2702 #define pci_err(pdev, fmt, arg...)	dev_err(&(pdev)->dev, fmt, ##arg)
2703 #define pci_warn(pdev, fmt, arg...)	dev_warn(&(pdev)->dev, fmt, ##arg)
2704 #define pci_warn_once(pdev, fmt, arg...) dev_warn_once(&(pdev)->dev, fmt, ##arg)
2705 #define pci_notice(pdev, fmt, arg...)	dev_notice(&(pdev)->dev, fmt, ##arg)
2706 #define pci_info(pdev, fmt, arg...)	dev_info(&(pdev)->dev, fmt, ##arg)
2707 #define pci_dbg(pdev, fmt, arg...)	dev_dbg(&(pdev)->dev, fmt, ##arg)
2708 
2709 #define pci_notice_ratelimited(pdev, fmt, arg...) \
2710 	dev_notice_ratelimited(&(pdev)->dev, fmt, ##arg)
2711 
2712 #define pci_info_ratelimited(pdev, fmt, arg...) \
2713 	dev_info_ratelimited(&(pdev)->dev, fmt, ##arg)
2714 
2715 #define pci_WARN(pdev, condition, fmt, arg...) \
2716 	WARN(condition, "%s %s: " fmt, \
2717 	     dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg)
2718 
2719 #define pci_WARN_ONCE(pdev, condition, fmt, arg...) \
2720 	WARN_ONCE(condition, "%s %s: " fmt, \
2721 		  dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg)
2722 
2723 #endif /* LINUX_PCI_H */
2724