1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * pci.h
4 *
5 * PCI defines and function prototypes
6 * Copyright 1994, Drew Eckhardt
7 * Copyright 1997--1999 Martin Mares <mj@ucw.cz>
8 *
9 * PCI Express ASPM defines and function prototypes
10 * Copyright (c) 2007 Intel Corp.
11 * Zhang Yanmin (yanmin.zhang@intel.com)
12 * Shaohua Li (shaohua.li@intel.com)
13 *
14 * For more information, please consult the following manuals (look at
15 * http://www.pcisig.com/ for how to get them):
16 *
17 * PCI BIOS Specification
18 * PCI Local Bus Specification
19 * PCI to PCI Bridge Specification
20 * PCI Express Specification
21 * PCI System Design Guide
22 */
23 #ifndef LINUX_PCI_H
24 #define LINUX_PCI_H
25
26 #include <linux/args.h>
27 #include <linux/mod_devicetable.h>
28
29 #include <linux/types.h>
30 #include <linux/init.h>
31 #include <linux/ioport.h>
32 #include <linux/list.h>
33 #include <linux/compiler.h>
34 #include <linux/errno.h>
35 #include <linux/kobject.h>
36 #include <linux/atomic.h>
37 #include <linux/device.h>
38 #include <linux/interrupt.h>
39 #include <linux/io.h>
40 #include <linux/resource_ext.h>
41 #include <linux/msi_api.h>
42 #include <uapi/linux/pci.h>
43
44 #include <linux/pci_ids.h>
45 #include <linux/android_kabi.h>
46
47 #define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
48 PCI_STATUS_SIG_SYSTEM_ERROR | \
49 PCI_STATUS_REC_MASTER_ABORT | \
50 PCI_STATUS_REC_TARGET_ABORT | \
51 PCI_STATUS_SIG_TARGET_ABORT | \
52 PCI_STATUS_PARITY)
53
54 /* Number of reset methods used in pci_reset_fn_methods array in pci.c */
55 #define PCI_NUM_RESET_METHODS 8
56
57 #define PCI_RESET_PROBE true
58 #define PCI_RESET_DO_RESET false
59
60 /*
61 * The PCI interface treats multi-function devices as independent
62 * devices. The slot/function address of each device is encoded
63 * in a single byte as follows:
64 *
65 * 7:3 = slot
66 * 2:0 = function
67 *
68 * PCI_DEVFN(), PCI_SLOT(), and PCI_FUNC() are defined in uapi/linux/pci.h.
69 * In the interest of not exposing interfaces to user-space unnecessarily,
70 * the following kernel-only defines are being added here.
71 */
72 #define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn))
73 /* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */
74 #define PCI_BUS_NUM(x) (((x) >> 8) & 0xff)
75
76 /* pci_slot represents a physical slot */
77 struct pci_slot {
78 struct pci_bus *bus; /* Bus this slot is on */
79 struct list_head list; /* Node in list of slots */
80 struct hotplug_slot *hotplug; /* Hotplug info (move here) */
81 unsigned char number; /* PCI_SLOT(pci_dev->devfn) */
82 struct kobject kobj;
83 };
84
pci_slot_name(const struct pci_slot * slot)85 static inline const char *pci_slot_name(const struct pci_slot *slot)
86 {
87 return kobject_name(&slot->kobj);
88 }
89
90 /* File state for mmap()s on /proc/bus/pci/X/Y */
91 enum pci_mmap_state {
92 pci_mmap_io,
93 pci_mmap_mem
94 };
95
96 /* For PCI devices, the region numbers are assigned this way: */
97 enum {
98 /* #0-5: standard PCI resources */
99 PCI_STD_RESOURCES,
100 PCI_STD_RESOURCE_END = PCI_STD_RESOURCES + PCI_STD_NUM_BARS - 1,
101
102 /* #6: expansion ROM resource */
103 PCI_ROM_RESOURCE,
104
105 /* Device-specific resources */
106 #ifdef CONFIG_PCI_IOV
107 PCI_IOV_RESOURCES,
108 PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1,
109 #endif
110
111 /* PCI-to-PCI (P2P) bridge windows */
112 #define PCI_BRIDGE_IO_WINDOW (PCI_BRIDGE_RESOURCES + 0)
113 #define PCI_BRIDGE_MEM_WINDOW (PCI_BRIDGE_RESOURCES + 1)
114 #define PCI_BRIDGE_PREF_MEM_WINDOW (PCI_BRIDGE_RESOURCES + 2)
115
116 /* CardBus bridge windows */
117 #define PCI_CB_BRIDGE_IO_0_WINDOW (PCI_BRIDGE_RESOURCES + 0)
118 #define PCI_CB_BRIDGE_IO_1_WINDOW (PCI_BRIDGE_RESOURCES + 1)
119 #define PCI_CB_BRIDGE_MEM_0_WINDOW (PCI_BRIDGE_RESOURCES + 2)
120 #define PCI_CB_BRIDGE_MEM_1_WINDOW (PCI_BRIDGE_RESOURCES + 3)
121
122 /* Total number of bridge resources for P2P and CardBus */
123 #define PCI_BRIDGE_RESOURCE_NUM 4
124
125 /* Resources assigned to buses behind the bridge */
126 PCI_BRIDGE_RESOURCES,
127 PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES +
128 PCI_BRIDGE_RESOURCE_NUM - 1,
129
130 /* Total resources associated with a PCI device */
131 PCI_NUM_RESOURCES,
132
133 /* Preserve this for compatibility */
134 DEVICE_COUNT_RESOURCE = PCI_NUM_RESOURCES,
135 };
136
137 /**
138 * enum pci_interrupt_pin - PCI INTx interrupt values
139 * @PCI_INTERRUPT_UNKNOWN: Unknown or unassigned interrupt
140 * @PCI_INTERRUPT_INTA: PCI INTA pin
141 * @PCI_INTERRUPT_INTB: PCI INTB pin
142 * @PCI_INTERRUPT_INTC: PCI INTC pin
143 * @PCI_INTERRUPT_INTD: PCI INTD pin
144 *
145 * Corresponds to values for legacy PCI INTx interrupts, as can be found in the
146 * PCI_INTERRUPT_PIN register.
147 */
148 enum pci_interrupt_pin {
149 PCI_INTERRUPT_UNKNOWN,
150 PCI_INTERRUPT_INTA,
151 PCI_INTERRUPT_INTB,
152 PCI_INTERRUPT_INTC,
153 PCI_INTERRUPT_INTD,
154 };
155
156 /* The number of legacy PCI INTx interrupts */
157 #define PCI_NUM_INTX 4
158
159 /*
160 * Reading from a device that doesn't respond typically returns ~0. A
161 * successful read from a device may also return ~0, so you need additional
162 * information to reliably identify errors.
163 */
164 #define PCI_ERROR_RESPONSE (~0ULL)
165 #define PCI_SET_ERROR_RESPONSE(val) (*(val) = ((typeof(*(val))) PCI_ERROR_RESPONSE))
166 #define PCI_POSSIBLE_ERROR(val) ((val) == ((typeof(val)) PCI_ERROR_RESPONSE))
167
168 /*
169 * pci_power_t values must match the bits in the Capabilities PME_Support
170 * and Control/Status PowerState fields in the Power Management capability.
171 */
172 typedef int __bitwise pci_power_t;
173
174 #define PCI_D0 ((pci_power_t __force) 0)
175 #define PCI_D1 ((pci_power_t __force) 1)
176 #define PCI_D2 ((pci_power_t __force) 2)
177 #define PCI_D3hot ((pci_power_t __force) 3)
178 #define PCI_D3cold ((pci_power_t __force) 4)
179 #define PCI_UNKNOWN ((pci_power_t __force) 5)
180 #define PCI_POWER_ERROR ((pci_power_t __force) -1)
181
182 /* Remember to update this when the list above changes! */
183 extern const char *pci_power_names[];
184
pci_power_name(pci_power_t state)185 static inline const char *pci_power_name(pci_power_t state)
186 {
187 return pci_power_names[1 + (__force int) state];
188 }
189
190 /**
191 * typedef pci_channel_state_t
192 *
193 * The pci_channel state describes connectivity between the CPU and
194 * the PCI device. If some PCI bus between here and the PCI device
195 * has crashed or locked up, this info is reflected here.
196 */
197 typedef unsigned int __bitwise pci_channel_state_t;
198
199 enum {
200 /* I/O channel is in normal state */
201 pci_channel_io_normal = (__force pci_channel_state_t) 1,
202
203 /* I/O to channel is blocked */
204 pci_channel_io_frozen = (__force pci_channel_state_t) 2,
205
206 /* PCI card is dead */
207 pci_channel_io_perm_failure = (__force pci_channel_state_t) 3,
208 };
209
210 typedef unsigned int __bitwise pcie_reset_state_t;
211
212 enum pcie_reset_state {
213 /* Reset is NOT asserted (Use to deassert reset) */
214 pcie_deassert_reset = (__force pcie_reset_state_t) 1,
215
216 /* Use #PERST to reset PCIe device */
217 pcie_warm_reset = (__force pcie_reset_state_t) 2,
218
219 /* Use PCIe Hot Reset to reset device */
220 pcie_hot_reset = (__force pcie_reset_state_t) 3
221 };
222
223 typedef unsigned short __bitwise pci_dev_flags_t;
224 enum pci_dev_flags {
225 /* INTX_DISABLE in PCI_COMMAND register disables MSI too */
226 PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) (1 << 0),
227 /* Device configuration is irrevocably lost if disabled into D3 */
228 PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) (1 << 1),
229 /* Provide indication device is assigned by a Virtual Machine Manager */
230 PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) (1 << 2),
231 /* Flag for quirk use to store if quirk-specific ACS is enabled */
232 PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = (__force pci_dev_flags_t) (1 << 3),
233 /* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */
234 PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5),
235 /* Do not use bus resets for device */
236 PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6),
237 /* Do not use PM reset even if device advertises NoSoftRst- */
238 PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
239 /* Get VPD from function 0 VPD */
240 PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8),
241 /* A non-root bridge where translation occurs, stop alias search here */
242 PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9),
243 /* Do not use FLR even if device advertises PCI_AF_CAP */
244 PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10),
245 /* Don't use Relaxed Ordering for TLPs directed at this device */
246 PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 11),
247 /* Device does honor MSI masking despite saying otherwise */
248 PCI_DEV_FLAGS_HAS_MSI_MASKING = (__force pci_dev_flags_t) (1 << 12),
249 /* Device requires write to PCI_MSIX_ENTRY_DATA before any MSIX reads */
250 PCI_DEV_FLAGS_MSIX_TOUCH_ENTRY_DATA_FIRST = (__force pci_dev_flags_t) (1 << 13),
251 };
252
253 enum pci_irq_reroute_variant {
254 INTEL_IRQ_REROUTE_VARIANT = 1,
255 MAX_IRQ_REROUTE_VARIANTS = 3
256 };
257
258 typedef unsigned short __bitwise pci_bus_flags_t;
259 enum pci_bus_flags {
260 PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1,
261 PCI_BUS_FLAGS_NO_MMRBC = (__force pci_bus_flags_t) 2,
262 PCI_BUS_FLAGS_NO_AERSID = (__force pci_bus_flags_t) 4,
263 PCI_BUS_FLAGS_NO_EXTCFG = (__force pci_bus_flags_t) 8,
264 };
265
266 /* Values from Link Status register, PCIe r3.1, sec 7.8.8 */
267 enum pcie_link_width {
268 PCIE_LNK_WIDTH_RESRV = 0x00,
269 PCIE_LNK_X1 = 0x01,
270 PCIE_LNK_X2 = 0x02,
271 PCIE_LNK_X4 = 0x04,
272 PCIE_LNK_X8 = 0x08,
273 PCIE_LNK_X12 = 0x0c,
274 PCIE_LNK_X16 = 0x10,
275 PCIE_LNK_X32 = 0x20,
276 PCIE_LNK_WIDTH_UNKNOWN = 0xff,
277 };
278
279 /* See matching string table in pci_speed_string() */
280 enum pci_bus_speed {
281 PCI_SPEED_33MHz = 0x00,
282 PCI_SPEED_66MHz = 0x01,
283 PCI_SPEED_66MHz_PCIX = 0x02,
284 PCI_SPEED_100MHz_PCIX = 0x03,
285 PCI_SPEED_133MHz_PCIX = 0x04,
286 PCI_SPEED_66MHz_PCIX_ECC = 0x05,
287 PCI_SPEED_100MHz_PCIX_ECC = 0x06,
288 PCI_SPEED_133MHz_PCIX_ECC = 0x07,
289 PCI_SPEED_66MHz_PCIX_266 = 0x09,
290 PCI_SPEED_100MHz_PCIX_266 = 0x0a,
291 PCI_SPEED_133MHz_PCIX_266 = 0x0b,
292 AGP_UNKNOWN = 0x0c,
293 AGP_1X = 0x0d,
294 AGP_2X = 0x0e,
295 AGP_4X = 0x0f,
296 AGP_8X = 0x10,
297 PCI_SPEED_66MHz_PCIX_533 = 0x11,
298 PCI_SPEED_100MHz_PCIX_533 = 0x12,
299 PCI_SPEED_133MHz_PCIX_533 = 0x13,
300 PCIE_SPEED_2_5GT = 0x14,
301 PCIE_SPEED_5_0GT = 0x15,
302 PCIE_SPEED_8_0GT = 0x16,
303 PCIE_SPEED_16_0GT = 0x17,
304 PCIE_SPEED_32_0GT = 0x18,
305 PCIE_SPEED_64_0GT = 0x19,
306 PCI_SPEED_UNKNOWN = 0xff,
307 };
308
309 enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev);
310 enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
311
312 struct pci_vpd {
313 struct mutex lock;
314 unsigned int len;
315 u8 cap;
316 };
317
318 struct irq_affinity;
319 struct pcie_link_state;
320 struct pci_sriov;
321 struct pci_p2pdma;
322 struct rcec_ea;
323
324 /* The pci_dev structure describes PCI devices */
325 struct pci_dev {
326 struct list_head bus_list; /* Node in per-bus list */
327 struct pci_bus *bus; /* Bus this device is on */
328 struct pci_bus *subordinate; /* Bus this device bridges to */
329
330 void *sysdata; /* Hook for sys-specific extension */
331 struct proc_dir_entry *procent; /* Device entry in /proc/bus/pci */
332 struct pci_slot *slot; /* Physical slot this device is in */
333
334 unsigned int devfn; /* Encoded device & function index */
335 unsigned short vendor;
336 unsigned short device;
337 unsigned short subsystem_vendor;
338 unsigned short subsystem_device;
339 unsigned int class; /* 3 bytes: (base,sub,prog-if) */
340 u8 revision; /* PCI revision, low byte of class word */
341 u8 hdr_type; /* PCI header type (`multi' flag masked out) */
342 #ifdef CONFIG_PCIEAER
343 u16 aer_cap; /* AER capability offset */
344 struct aer_stats *aer_stats; /* AER stats for this device */
345 #endif
346 #ifdef CONFIG_PCIEPORTBUS
347 struct rcec_ea *rcec_ea; /* RCEC cached endpoint association */
348 struct pci_dev *rcec; /* Associated RCEC device */
349 #endif
350 u32 devcap; /* PCIe Device Capabilities */
351 u8 pcie_cap; /* PCIe capability offset */
352 u8 msi_cap; /* MSI capability offset */
353 u8 msix_cap; /* MSI-X capability offset */
354 u8 pcie_mpss:3; /* PCIe Max Payload Size Supported */
355 u8 rom_base_reg; /* Config register controlling ROM */
356 u8 pin; /* Interrupt pin this device uses */
357 u16 pcie_flags_reg; /* Cached PCIe Capabilities Register */
358 unsigned long *dma_alias_mask;/* Mask of enabled devfn aliases */
359
360 struct pci_driver *driver; /* Driver bound to this device */
361 u64 dma_mask; /* Mask of the bits of bus address this
362 device implements. Normally this is
363 0xffffffff. You only need to change
364 this if your device has broken DMA
365 or supports 64-bit transfers. */
366
367 struct device_dma_parameters dma_parms;
368
369 pci_power_t current_state; /* Current operating state. In ACPI,
370 this is D0-D3, D0 being fully
371 functional, and D3 being off. */
372 u8 pm_cap; /* PM capability offset */
373 unsigned int pme_support:5; /* Bitmask of states from which PME#
374 can be generated */
375 unsigned int pme_poll:1; /* Poll device's PME status bit */
376 unsigned int pinned:1; /* Whether this dev is pinned */
377 unsigned int config_rrs_sv:1; /* Config RRS software visibility */
378 unsigned int imm_ready:1; /* Supports Immediate Readiness */
379 unsigned int d1_support:1; /* Low power state D1 is supported */
380 unsigned int d2_support:1; /* Low power state D2 is supported */
381 unsigned int no_d1d2:1; /* D1 and D2 are forbidden */
382 unsigned int no_d3cold:1; /* D3cold is forbidden */
383 unsigned int bridge_d3:1; /* Allow D3 for bridge */
384 unsigned int d3cold_allowed:1; /* D3cold is allowed by user */
385 unsigned int mmio_always_on:1; /* Disallow turning off io/mem
386 decoding during BAR sizing */
387 unsigned int wakeup_prepared:1;
388 unsigned int skip_bus_pm:1; /* Internal: Skip bus-level PM */
389 unsigned int ignore_hotplug:1; /* Ignore hotplug events */
390 unsigned int hotplug_user_indicators:1; /* SlotCtl indicators
391 controlled exclusively by
392 user sysfs */
393 unsigned int clear_retrain_link:1; /* Need to clear Retrain Link
394 bit manually */
395 unsigned int d3hot_delay; /* D3hot->D0 transition time in ms */
396 unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */
397
398 u16 l1ss; /* L1SS Capability pointer */
399 #ifdef CONFIG_PCIEASPM
400 struct pcie_link_state *link_state; /* ASPM link state */
401 unsigned int ltr_path:1; /* Latency Tolerance Reporting
402 supported from root to here */
403 #endif
404 unsigned int pasid_no_tlp:1; /* PASID works without TLP Prefix */
405 unsigned int eetlp_prefix_path:1; /* End-to-End TLP Prefix */
406
407 pci_channel_state_t error_state; /* Current connectivity state */
408 struct device dev; /* Generic device interface */
409
410 int cfg_size; /* Size of config space */
411
412 /*
413 * Instead of touching interrupt line and base address registers
414 * directly, use the values stored here. They might be different!
415 */
416 unsigned int irq;
417 struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
418 struct resource driver_exclusive_resource; /* driver exclusive resource ranges */
419
420 bool match_driver; /* Skip attaching driver */
421
422 unsigned int transparent:1; /* Subtractive decode bridge */
423 unsigned int io_window:1; /* Bridge has I/O window */
424 unsigned int pref_window:1; /* Bridge has pref mem window */
425 unsigned int pref_64_window:1; /* Pref mem window is 64-bit */
426 unsigned int multifunction:1; /* Multi-function device */
427
428 unsigned int is_busmaster:1; /* Is busmaster */
429 unsigned int no_msi:1; /* May not use MSI */
430 unsigned int no_64bit_msi:1; /* May only use 32-bit MSIs */
431 unsigned int block_cfg_access:1; /* Config space access blocked */
432 unsigned int broken_parity_status:1; /* Generates false positive parity */
433 unsigned int irq_reroute_variant:2; /* Needs IRQ rerouting variant */
434 unsigned int msi_enabled:1;
435 unsigned int msix_enabled:1;
436 unsigned int ari_enabled:1; /* ARI forwarding */
437 unsigned int ats_enabled:1; /* Address Translation Svc */
438 unsigned int pasid_enabled:1; /* Process Address Space ID */
439 unsigned int pri_enabled:1; /* Page Request Interface */
440 unsigned int is_managed:1; /* Managed via devres */
441 unsigned int is_msi_managed:1; /* MSI release via devres installed */
442 unsigned int needs_freset:1; /* Requires fundamental reset */
443 unsigned int state_saved:1;
444 unsigned int is_physfn:1;
445 unsigned int is_virtfn:1;
446 unsigned int is_hotplug_bridge:1;
447 unsigned int shpc_managed:1; /* SHPC owned by shpchp */
448 unsigned int is_thunderbolt:1; /* Thunderbolt controller */
449 /*
450 * Devices marked with requires_dma_protection are the ones that can
451 * potentially execute DMA attacks and similar. They are typically connected
452 * through external ports such as Thunderbolt but not limited to
453 * that. When an IOMMU is enabled they should be getting full
454 * mappings to make sure they cannot access arbitrary memory.
455 */
456 unsigned int requires_dma_protection:1;
457
458 /*
459 * Info from the platform, e.g., ACPI or device tree, may mark a
460 * device as "external-facing". An external-facing device is
461 * itself internal but devices downstream from it are external.
462 */
463 unsigned int external_facing:1;
464 unsigned int broken_intx_masking:1; /* INTx masking can't be used */
465 unsigned int io_window_1k:1; /* Intel bridge 1K I/O windows */
466 unsigned int irq_managed:1;
467 unsigned int non_compliant_bars:1; /* Broken BARs; ignore them */
468 unsigned int is_probed:1; /* Device probing in progress */
469 unsigned int link_active_reporting:1;/* Device capable of reporting link active */
470 unsigned int no_vf_scan:1; /* Don't scan for VFs after IOV enablement */
471 unsigned int no_command_memory:1; /* No PCI_COMMAND_MEMORY */
472 unsigned int rom_bar_overlap:1; /* ROM BAR disable broken */
473 unsigned int rom_attr_enabled:1; /* Display of ROM attribute enabled? */
474 pci_dev_flags_t dev_flags;
475 atomic_t enable_cnt; /* pci_enable_device has been called */
476
477 spinlock_t pcie_cap_lock; /* Protects RMW ops in capability accessors */
478 u32 saved_config_space[16]; /* Config space saved at suspend time */
479 struct hlist_head saved_cap_space;
480 struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
481 struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
482
483 #ifdef CONFIG_HOTPLUG_PCI_PCIE
484 unsigned int broken_cmd_compl:1; /* No compl for some cmds */
485 #endif
486 #ifdef CONFIG_PCIE_PTM
487 u16 ptm_cap; /* PTM Capability */
488 unsigned int ptm_root:1;
489 unsigned int ptm_enabled:1;
490 u8 ptm_granularity;
491 #endif
492 #ifdef CONFIG_PCI_MSI
493 void __iomem *msix_base;
494 raw_spinlock_t msi_lock;
495 #endif
496 struct pci_vpd vpd;
497 #ifdef CONFIG_PCIE_DPC
498 u16 dpc_cap;
499 unsigned int dpc_rp_extensions:1;
500 u8 dpc_rp_log_size;
501 #endif
502 #ifdef CONFIG_PCI_ATS
503 union {
504 struct pci_sriov *sriov; /* PF: SR-IOV info */
505 struct pci_dev *physfn; /* VF: related PF */
506 };
507 u16 ats_cap; /* ATS Capability offset */
508 u8 ats_stu; /* ATS Smallest Translation Unit */
509 #endif
510 #ifdef CONFIG_PCI_PRI
511 u16 pri_cap; /* PRI Capability offset */
512 u32 pri_reqs_alloc; /* Number of PRI requests allocated */
513 unsigned int pasid_required:1; /* PRG Response PASID Required */
514 #endif
515 #ifdef CONFIG_PCI_PASID
516 u16 pasid_cap; /* PASID Capability offset */
517 u16 pasid_features;
518 #endif
519 #ifdef CONFIG_PCI_P2PDMA
520 struct pci_p2pdma __rcu *p2pdma;
521 #endif
522 #ifdef CONFIG_PCI_DOE
523 struct xarray doe_mbs; /* Data Object Exchange mailboxes */
524 #endif
525 #ifdef CONFIG_PCI_NPEM
526 struct npem *npem; /* Native PCIe Enclosure Management */
527 #endif
528 u16 acs_cap; /* ACS Capability offset */
529 phys_addr_t rom; /* Physical address if not from BAR */
530 size_t romlen; /* Length if not from BAR */
531 /*
532 * Driver name to force a match. Do not set directly, because core
533 * frees it. Use driver_set_override() to set or clear it.
534 */
535 const char *driver_override;
536
537 unsigned long priv_flags; /* Private flags for the PCI driver */
538
539 /* These methods index pci_reset_fn_methods[] */
540 u8 reset_methods[PCI_NUM_RESET_METHODS]; /* In priority order */
541
542 ANDROID_KABI_RESERVE(1);
543 ANDROID_KABI_RESERVE(2);
544 ANDROID_KABI_RESERVE(3);
545 ANDROID_KABI_RESERVE(4);
546 };
547
pci_physfn(struct pci_dev * dev)548 static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
549 {
550 #ifdef CONFIG_PCI_IOV
551 if (dev->is_virtfn)
552 dev = dev->physfn;
553 #endif
554 return dev;
555 }
556
557 struct pci_dev *pci_alloc_dev(struct pci_bus *bus);
558
559 #define to_pci_dev(n) container_of(n, struct pci_dev, dev)
560 #define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
561
pci_channel_offline(struct pci_dev * pdev)562 static inline int pci_channel_offline(struct pci_dev *pdev)
563 {
564 return (pdev->error_state != pci_channel_io_normal);
565 }
566
567 /*
568 * Currently in ACPI spec, for each PCI host bridge, PCI Segment
569 * Group number is limited to a 16-bit value, therefore (int)-1 is
570 * not a valid PCI domain number, and can be used as a sentinel
571 * value indicating ->domain_nr is not set by the driver (and
572 * CONFIG_PCI_DOMAINS_GENERIC=y archs will set it with
573 * pci_bus_find_domain_nr()).
574 */
575 #define PCI_DOMAIN_NR_NOT_SET (-1)
576
577 struct pci_host_bridge {
578 struct device dev;
579 struct pci_bus *bus; /* Root bus */
580 struct pci_ops *ops;
581 struct pci_ops *child_ops;
582 void *sysdata;
583 int busnr;
584 int domain_nr;
585 struct list_head windows; /* resource_entry */
586 struct list_head dma_ranges; /* dma ranges resource list */
587 u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* Platform IRQ swizzler */
588 int (*map_irq)(const struct pci_dev *, u8, u8);
589 void (*release_fn)(struct pci_host_bridge *);
590 void *release_data;
591 unsigned int ignore_reset_delay:1; /* For entire hierarchy */
592 unsigned int no_ext_tags:1; /* No Extended Tags */
593 unsigned int no_inc_mrrs:1; /* No Increase MRRS */
594 unsigned int native_aer:1; /* OS may use PCIe AER */
595 unsigned int native_pcie_hotplug:1; /* OS may use PCIe hotplug */
596 unsigned int native_shpc_hotplug:1; /* OS may use SHPC hotplug */
597 unsigned int native_pme:1; /* OS may use PCIe PME */
598 unsigned int native_ltr:1; /* OS may use PCIe LTR */
599 unsigned int native_dpc:1; /* OS may use PCIe DPC */
600 unsigned int native_cxl_error:1; /* OS may use CXL RAS/Events */
601 unsigned int preserve_config:1; /* Preserve FW resource setup */
602 unsigned int size_windows:1; /* Enable root bus sizing */
603 unsigned int msi_domain:1; /* Bridge wants MSI domain */
604
605 /* Resource alignment requirements */
606 resource_size_t (*align_resource)(struct pci_dev *dev,
607 const struct resource *res,
608 resource_size_t start,
609 resource_size_t size,
610 resource_size_t align);
611
612 ANDROID_KABI_RESERVE(1);
613 ANDROID_KABI_RESERVE(2);
614
615 unsigned long private[] ____cacheline_aligned;
616 };
617
618 #define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev)
619
pci_host_bridge_priv(struct pci_host_bridge * bridge)620 static inline void *pci_host_bridge_priv(struct pci_host_bridge *bridge)
621 {
622 return (void *)bridge->private;
623 }
624
pci_host_bridge_from_priv(void * priv)625 static inline struct pci_host_bridge *pci_host_bridge_from_priv(void *priv)
626 {
627 return container_of(priv, struct pci_host_bridge, private);
628 }
629
630 struct pci_host_bridge *pci_alloc_host_bridge(size_t priv);
631 struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev,
632 size_t priv);
633 void pci_free_host_bridge(struct pci_host_bridge *bridge);
634 struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus);
635
636 void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
637 void (*release_fn)(struct pci_host_bridge *),
638 void *release_data);
639
640 int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge);
641
642 /*
643 * The first PCI_BRIDGE_RESOURCE_NUM PCI bus resources (those that correspond
644 * to P2P or CardBus bridge windows) go in a table. Additional ones (for
645 * buses below host bridges or subtractive decode bridges) go in the list.
646 * Use pci_bus_for_each_resource() to iterate through all the resources.
647 */
648
649 /*
650 * PCI_SUBTRACTIVE_DECODE means the bridge forwards the window implicitly
651 * and there's no way to program the bridge with the details of the window.
652 * This does not apply to ACPI _CRS windows, even with the _DEC subtractive-
653 * decode bit set, because they are explicit and can be programmed with _SRS.
654 */
655 #define PCI_SUBTRACTIVE_DECODE 0x1
656
657 struct pci_bus_resource {
658 struct list_head list;
659 struct resource *res;
660 unsigned int flags;
661 };
662
663 #define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */
664
665 struct pci_bus {
666 struct list_head node; /* Node in list of buses */
667 struct pci_bus *parent; /* Parent bus this bridge is on */
668 struct list_head children; /* List of child buses */
669 struct list_head devices; /* List of devices on this bus */
670 struct pci_dev *self; /* Bridge device as seen by parent */
671 struct list_head slots; /* List of slots on this bus;
672 protected by pci_slot_mutex */
673 struct resource *resource[PCI_BRIDGE_RESOURCE_NUM];
674 struct list_head resources; /* Address space routed to this bus */
675 struct resource busn_res; /* Bus numbers routed to this bus */
676
677 struct pci_ops *ops; /* Configuration access functions */
678 void *sysdata; /* Hook for sys-specific extension */
679 struct proc_dir_entry *procdir; /* Directory entry in /proc/bus/pci */
680
681 unsigned char number; /* Bus number */
682 unsigned char primary; /* Number of primary bridge */
683 unsigned char max_bus_speed; /* enum pci_bus_speed */
684 unsigned char cur_bus_speed; /* enum pci_bus_speed */
685 #ifdef CONFIG_PCI_DOMAINS_GENERIC
686 int domain_nr;
687 #endif
688
689 char name[48];
690
691 unsigned short bridge_ctl; /* Manage NO_ISA/FBB/et al behaviors */
692 pci_bus_flags_t bus_flags; /* Inherited by child buses */
693 struct device *bridge;
694 struct device dev;
695 struct bin_attribute *legacy_io; /* Legacy I/O for this bus */
696 struct bin_attribute *legacy_mem; /* Legacy mem */
697 unsigned int is_added:1;
698 unsigned int unsafe_warn:1; /* warned about RW1C config write */
699
700 ANDROID_KABI_RESERVE(1);
701 ANDROID_KABI_RESERVE(2);
702 ANDROID_KABI_RESERVE(3);
703 ANDROID_KABI_RESERVE(4);
704 };
705
706 #define to_pci_bus(n) container_of(n, struct pci_bus, dev)
707
pci_dev_id(struct pci_dev * dev)708 static inline u16 pci_dev_id(struct pci_dev *dev)
709 {
710 return PCI_DEVID(dev->bus->number, dev->devfn);
711 }
712
713 /*
714 * Returns true if the PCI bus is root (behind host-PCI bridge),
715 * false otherwise
716 *
717 * Some code assumes that "bus->self == NULL" means that bus is a root bus.
718 * This is incorrect because "virtual" buses added for SR-IOV (via
719 * virtfn_add_bus()) have "bus->self == NULL" but are not root buses.
720 */
pci_is_root_bus(struct pci_bus * pbus)721 static inline bool pci_is_root_bus(struct pci_bus *pbus)
722 {
723 return !(pbus->parent);
724 }
725
726 /**
727 * pci_is_bridge - check if the PCI device is a bridge
728 * @dev: PCI device
729 *
730 * Return true if the PCI device is bridge whether it has subordinate
731 * or not.
732 */
pci_is_bridge(struct pci_dev * dev)733 static inline bool pci_is_bridge(struct pci_dev *dev)
734 {
735 return dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
736 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS;
737 }
738
739 /**
740 * pci_is_vga - check if the PCI device is a VGA device
741 * @pdev: PCI device
742 *
743 * The PCI Code and ID Assignment spec, r1.15, secs 1.4 and 1.1, define
744 * VGA Base Class and Sub-Classes:
745 *
746 * 03 00 PCI_CLASS_DISPLAY_VGA VGA-compatible or 8514-compatible
747 * 00 01 PCI_CLASS_NOT_DEFINED_VGA VGA-compatible (before Class Code)
748 *
749 * Return true if the PCI device is a VGA device and uses the legacy VGA
750 * resources ([mem 0xa0000-0xbffff], [io 0x3b0-0x3bb], [io 0x3c0-0x3df] and
751 * aliases).
752 */
pci_is_vga(struct pci_dev * pdev)753 static inline bool pci_is_vga(struct pci_dev *pdev)
754 {
755 if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
756 return true;
757
758 if ((pdev->class >> 8) == PCI_CLASS_NOT_DEFINED_VGA)
759 return true;
760
761 return false;
762 }
763
764 #define for_each_pci_bridge(dev, bus) \
765 list_for_each_entry(dev, &bus->devices, bus_list) \
766 if (!pci_is_bridge(dev)) {} else
767
pci_upstream_bridge(struct pci_dev * dev)768 static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev)
769 {
770 dev = pci_physfn(dev);
771 if (pci_is_root_bus(dev->bus))
772 return NULL;
773
774 return dev->bus->self;
775 }
776
777 #ifdef CONFIG_PCI_MSI
pci_dev_msi_enabled(struct pci_dev * pci_dev)778 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev)
779 {
780 return pci_dev->msi_enabled || pci_dev->msix_enabled;
781 }
782 #else
pci_dev_msi_enabled(struct pci_dev * pci_dev)783 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; }
784 #endif
785
786 /* Error values that may be returned by PCI functions */
787 #define PCIBIOS_SUCCESSFUL 0x00
788 #define PCIBIOS_FUNC_NOT_SUPPORTED 0x81
789 #define PCIBIOS_BAD_VENDOR_ID 0x83
790 #define PCIBIOS_DEVICE_NOT_FOUND 0x86
791 #define PCIBIOS_BAD_REGISTER_NUMBER 0x87
792 #define PCIBIOS_SET_FAILED 0x88
793 #define PCIBIOS_BUFFER_TOO_SMALL 0x89
794
795 /* Translate above to generic errno for passing back through non-PCI code */
pcibios_err_to_errno(int err)796 static inline int pcibios_err_to_errno(int err)
797 {
798 if (err <= PCIBIOS_SUCCESSFUL)
799 return err; /* Assume already errno */
800
801 switch (err) {
802 case PCIBIOS_FUNC_NOT_SUPPORTED:
803 return -ENOENT;
804 case PCIBIOS_BAD_VENDOR_ID:
805 return -ENOTTY;
806 case PCIBIOS_DEVICE_NOT_FOUND:
807 return -ENODEV;
808 case PCIBIOS_BAD_REGISTER_NUMBER:
809 return -EFAULT;
810 case PCIBIOS_SET_FAILED:
811 return -EIO;
812 case PCIBIOS_BUFFER_TOO_SMALL:
813 return -ENOSPC;
814 }
815
816 return -ERANGE;
817 }
818
819 /* Low-level architecture-dependent routines */
820
821 struct pci_ops {
822 int (*add_bus)(struct pci_bus *bus);
823 void (*remove_bus)(struct pci_bus *bus);
824 void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where);
825 int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val);
826 int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val);
827
828 ANDROID_KABI_RESERVE(1);
829 };
830
831 /*
832 * ACPI needs to be able to access PCI config space before we've done a
833 * PCI bus scan and created pci_bus structures.
834 */
835 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
836 int reg, int len, u32 *val);
837 int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
838 int reg, int len, u32 val);
839
840 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
841 typedef u64 pci_bus_addr_t;
842 #else
843 typedef u32 pci_bus_addr_t;
844 #endif
845
846 struct pci_bus_region {
847 pci_bus_addr_t start;
848 pci_bus_addr_t end;
849 };
850
851 struct pci_dynids {
852 spinlock_t lock; /* Protects list, index */
853 struct list_head list; /* For IDs added at runtime */
854 };
855
856
857 /*
858 * PCI Error Recovery System (PCI-ERS). If a PCI device driver provides
859 * a set of callbacks in struct pci_error_handlers, that device driver
860 * will be notified of PCI bus errors, and will be driven to recovery
861 * when an error occurs.
862 */
863
864 typedef unsigned int __bitwise pci_ers_result_t;
865
866 enum pci_ers_result {
867 /* No result/none/not supported in device driver */
868 PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1,
869
870 /* Device driver can recover without slot reset */
871 PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2,
872
873 /* Device driver wants slot to be reset */
874 PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3,
875
876 /* Device has completely failed, is unrecoverable */
877 PCI_ERS_RESULT_DISCONNECT = (__force pci_ers_result_t) 4,
878
879 /* Device driver is fully recovered and operational */
880 PCI_ERS_RESULT_RECOVERED = (__force pci_ers_result_t) 5,
881
882 /* No AER capabilities registered for the driver */
883 PCI_ERS_RESULT_NO_AER_DRIVER = (__force pci_ers_result_t) 6,
884 };
885
886 /* PCI bus error event callbacks */
887 struct pci_error_handlers {
888 /* PCI bus error detected on this device */
889 pci_ers_result_t (*error_detected)(struct pci_dev *dev,
890 pci_channel_state_t error);
891
892 /* MMIO has been re-enabled, but not DMA */
893 pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev);
894
895 /* PCI slot has been reset */
896 pci_ers_result_t (*slot_reset)(struct pci_dev *dev);
897
898 /* PCI function reset prepare or completed */
899 void (*reset_prepare)(struct pci_dev *dev);
900 void (*reset_done)(struct pci_dev *dev);
901
902 /* Device driver may resume normal operations */
903 void (*resume)(struct pci_dev *dev);
904
905 /* Allow device driver to record more details of a correctable error */
906 void (*cor_error_detected)(struct pci_dev *dev);
907
908 ANDROID_KABI_RESERVE(1);
909 };
910
911
912 struct module;
913
914 /**
915 * struct pci_driver - PCI driver structure
916 * @name: Driver name.
917 * @id_table: Pointer to table of device IDs the driver is
918 * interested in. Most drivers should export this
919 * table using MODULE_DEVICE_TABLE(pci,...).
920 * @probe: This probing function gets called (during execution
921 * of pci_register_driver() for already existing
922 * devices or later if a new device gets inserted) for
923 * all PCI devices which match the ID table and are not
924 * "owned" by the other drivers yet. This function gets
925 * passed a "struct pci_dev \*" for each device whose
926 * entry in the ID table matches the device. The probe
927 * function returns zero when the driver chooses to
928 * take "ownership" of the device or an error code
929 * (negative number) otherwise.
930 * The probe function always gets called from process
931 * context, so it can sleep.
932 * @remove: The remove() function gets called whenever a device
933 * being handled by this driver is removed (either during
934 * deregistration of the driver or when it's manually
935 * pulled out of a hot-pluggable slot).
936 * The remove function always gets called from process
937 * context, so it can sleep.
938 * @suspend: Put device into low power state.
939 * @resume: Wake device from low power state.
940 * (Please see Documentation/power/pci.rst for descriptions
941 * of PCI Power Management and the related functions.)
942 * @shutdown: Hook into reboot_notifier_list (kernel/sys.c).
943 * Intended to stop any idling DMA operations.
944 * Useful for enabling wake-on-lan (NIC) or changing
945 * the power state of a device before reboot.
946 * e.g. drivers/net/e100.c.
947 * @sriov_configure: Optional driver callback to allow configuration of
948 * number of VFs to enable via sysfs "sriov_numvfs" file.
949 * @sriov_set_msix_vec_count: PF Driver callback to change number of MSI-X
950 * vectors on a VF. Triggered via sysfs "sriov_vf_msix_count".
951 * This will change MSI-X Table Size in the VF Message Control
952 * registers.
953 * @sriov_get_vf_total_msix: PF driver callback to get the total number of
954 * MSI-X vectors available for distribution to the VFs.
955 * @err_handler: See Documentation/PCI/pci-error-recovery.rst
956 * @groups: Sysfs attribute groups.
957 * @dev_groups: Attributes attached to the device that will be
958 * created once it is bound to the driver.
959 * @driver: Driver model structure.
960 * @dynids: List of dynamically added device IDs.
961 * @driver_managed_dma: Device driver doesn't use kernel DMA API for DMA.
962 * For most device drivers, no need to care about this flag
963 * as long as all DMAs are handled through the kernel DMA API.
964 * For some special ones, for example VFIO drivers, they know
965 * how to manage the DMA themselves and set this flag so that
966 * the IOMMU layer will allow them to setup and manage their
967 * own I/O address space.
968 */
969 struct pci_driver {
970 const char *name;
971 const struct pci_device_id *id_table; /* Must be non-NULL for probe to be called */
972 int (*probe)(struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */
973 void (*remove)(struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */
974 int (*suspend)(struct pci_dev *dev, pm_message_t state); /* Device suspended */
975 int (*resume)(struct pci_dev *dev); /* Device woken up */
976 void (*shutdown)(struct pci_dev *dev);
977 int (*sriov_configure)(struct pci_dev *dev, int num_vfs); /* On PF */
978 int (*sriov_set_msix_vec_count)(struct pci_dev *vf, int msix_vec_count); /* On PF */
979 u32 (*sriov_get_vf_total_msix)(struct pci_dev *pf);
980 const struct pci_error_handlers *err_handler;
981 const struct attribute_group **groups;
982 const struct attribute_group **dev_groups;
983 struct device_driver driver;
984 struct pci_dynids dynids;
985 bool driver_managed_dma;
986
987 ANDROID_KABI_RESERVE(1);
988 ANDROID_KABI_RESERVE(2);
989 ANDROID_KABI_RESERVE(3);
990 ANDROID_KABI_RESERVE(4);
991 };
992
993 #define to_pci_driver(__drv) \
994 ( __drv ? container_of_const(__drv, struct pci_driver, driver) : NULL )
995
996 /**
997 * PCI_DEVICE - macro used to describe a specific PCI device
998 * @vend: the 16 bit PCI Vendor ID
999 * @dev: the 16 bit PCI Device ID
1000 *
1001 * This macro is used to create a struct pci_device_id that matches a
1002 * specific device. The subvendor and subdevice fields will be set to
1003 * PCI_ANY_ID.
1004 */
1005 #define PCI_DEVICE(vend,dev) \
1006 .vendor = (vend), .device = (dev), \
1007 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
1008
1009 /**
1010 * PCI_DEVICE_DRIVER_OVERRIDE - macro used to describe a PCI device with
1011 * override_only flags.
1012 * @vend: the 16 bit PCI Vendor ID
1013 * @dev: the 16 bit PCI Device ID
1014 * @driver_override: the 32 bit PCI Device override_only
1015 *
1016 * This macro is used to create a struct pci_device_id that matches only a
1017 * driver_override device. The subvendor and subdevice fields will be set to
1018 * PCI_ANY_ID.
1019 */
1020 #define PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, driver_override) \
1021 .vendor = (vend), .device = (dev), .subvendor = PCI_ANY_ID, \
1022 .subdevice = PCI_ANY_ID, .override_only = (driver_override)
1023
1024 /**
1025 * PCI_DRIVER_OVERRIDE_DEVICE_VFIO - macro used to describe a VFIO
1026 * "driver_override" PCI device.
1027 * @vend: the 16 bit PCI Vendor ID
1028 * @dev: the 16 bit PCI Device ID
1029 *
1030 * This macro is used to create a struct pci_device_id that matches a
1031 * specific device. The subvendor and subdevice fields will be set to
1032 * PCI_ANY_ID and the driver_override will be set to
1033 * PCI_ID_F_VFIO_DRIVER_OVERRIDE.
1034 */
1035 #define PCI_DRIVER_OVERRIDE_DEVICE_VFIO(vend, dev) \
1036 PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, PCI_ID_F_VFIO_DRIVER_OVERRIDE)
1037
1038 /**
1039 * PCI_DEVICE_SUB - macro used to describe a specific PCI device with subsystem
1040 * @vend: the 16 bit PCI Vendor ID
1041 * @dev: the 16 bit PCI Device ID
1042 * @subvend: the 16 bit PCI Subvendor ID
1043 * @subdev: the 16 bit PCI Subdevice ID
1044 *
1045 * This macro is used to create a struct pci_device_id that matches a
1046 * specific device with subsystem information.
1047 */
1048 #define PCI_DEVICE_SUB(vend, dev, subvend, subdev) \
1049 .vendor = (vend), .device = (dev), \
1050 .subvendor = (subvend), .subdevice = (subdev)
1051
1052 /**
1053 * PCI_DEVICE_CLASS - macro used to describe a specific PCI device class
1054 * @dev_class: the class, subclass, prog-if triple for this device
1055 * @dev_class_mask: the class mask for this device
1056 *
1057 * This macro is used to create a struct pci_device_id that matches a
1058 * specific PCI class. The vendor, device, subvendor, and subdevice
1059 * fields will be set to PCI_ANY_ID.
1060 */
1061 #define PCI_DEVICE_CLASS(dev_class,dev_class_mask) \
1062 .class = (dev_class), .class_mask = (dev_class_mask), \
1063 .vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \
1064 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
1065
1066 /**
1067 * PCI_VDEVICE - macro used to describe a specific PCI device in short form
1068 * @vend: the vendor name
1069 * @dev: the 16 bit PCI Device ID
1070 *
1071 * This macro is used to create a struct pci_device_id that matches a
1072 * specific PCI device. The subvendor, and subdevice fields will be set
1073 * to PCI_ANY_ID. The macro allows the next field to follow as the device
1074 * private data.
1075 */
1076 #define PCI_VDEVICE(vend, dev) \
1077 .vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
1078 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0
1079
1080 /**
1081 * PCI_DEVICE_DATA - macro used to describe a specific PCI device in very short form
1082 * @vend: the vendor name (without PCI_VENDOR_ID_ prefix)
1083 * @dev: the device name (without PCI_DEVICE_ID_<vend>_ prefix)
1084 * @data: the driver data to be filled
1085 *
1086 * This macro is used to create a struct pci_device_id that matches a
1087 * specific PCI device. The subvendor, and subdevice fields will be set
1088 * to PCI_ANY_ID.
1089 */
1090 #define PCI_DEVICE_DATA(vend, dev, data) \
1091 .vendor = PCI_VENDOR_ID_##vend, .device = PCI_DEVICE_ID_##vend##_##dev, \
1092 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0, \
1093 .driver_data = (kernel_ulong_t)(data)
1094
1095 enum {
1096 PCI_REASSIGN_ALL_RSRC = 0x00000001, /* Ignore firmware setup */
1097 PCI_REASSIGN_ALL_BUS = 0x00000002, /* Reassign all bus numbers */
1098 PCI_PROBE_ONLY = 0x00000004, /* Use existing setup */
1099 PCI_CAN_SKIP_ISA_ALIGN = 0x00000008, /* Don't do ISA alignment */
1100 PCI_ENABLE_PROC_DOMAINS = 0x00000010, /* Enable domains in /proc */
1101 PCI_COMPAT_DOMAIN_0 = 0x00000020, /* ... except domain 0 */
1102 PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* Scan all, not just dev 0 */
1103 };
1104
1105 #define PCI_IRQ_INTX (1 << 0) /* Allow INTx interrupts */
1106 #define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */
1107 #define PCI_IRQ_MSIX (1 << 2) /* Allow MSI-X interrupts */
1108 #define PCI_IRQ_AFFINITY (1 << 3) /* Auto-assign affinity */
1109
1110 /* These external functions are only available when PCI support is enabled */
1111 #ifdef CONFIG_PCI
1112
1113 extern unsigned int pci_flags;
1114
pci_set_flags(int flags)1115 static inline void pci_set_flags(int flags) { pci_flags = flags; }
pci_add_flags(int flags)1116 static inline void pci_add_flags(int flags) { pci_flags |= flags; }
pci_clear_flags(int flags)1117 static inline void pci_clear_flags(int flags) { pci_flags &= ~flags; }
pci_has_flag(int flag)1118 static inline int pci_has_flag(int flag) { return pci_flags & flag; }
1119
1120 void pcie_bus_configure_settings(struct pci_bus *bus);
1121
1122 enum pcie_bus_config_types {
1123 PCIE_BUS_TUNE_OFF, /* Don't touch MPS at all */
1124 PCIE_BUS_DEFAULT, /* Ensure MPS matches upstream bridge */
1125 PCIE_BUS_SAFE, /* Use largest MPS boot-time devices support */
1126 PCIE_BUS_PERFORMANCE, /* Use MPS and MRRS for best performance */
1127 PCIE_BUS_PEER2PEER, /* Set MPS = 128 for all devices */
1128 };
1129
1130 extern enum pcie_bus_config_types pcie_bus_config;
1131
1132 extern const struct bus_type pci_bus_type;
1133
1134 /* Do NOT directly access these two variables, unless you are arch-specific PCI
1135 * code, or PCI core code. */
1136 extern struct list_head pci_root_buses; /* List of all known PCI buses */
1137 /* Some device drivers need know if PCI is initiated */
1138 int no_pci_devices(void);
1139
1140 void pcibios_resource_survey_bus(struct pci_bus *bus);
1141 void pcibios_bus_add_device(struct pci_dev *pdev);
1142 void pcibios_add_bus(struct pci_bus *bus);
1143 void pcibios_remove_bus(struct pci_bus *bus);
1144 void pcibios_fixup_bus(struct pci_bus *);
1145 int __must_check pcibios_enable_device(struct pci_dev *, int mask);
1146 /* Architecture-specific versions may override this (weak) */
1147 char *pcibios_setup(char *str);
1148
1149 /* Used only when drivers/pci/setup.c is used */
1150 resource_size_t pcibios_align_resource(void *, const struct resource *,
1151 resource_size_t,
1152 resource_size_t);
1153
1154 /* Weak but can be overridden by arch */
1155 void pci_fixup_cardbus(struct pci_bus *);
1156
1157 /* Generic PCI functions used internally */
1158
1159 void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region,
1160 struct resource *res);
1161 void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res,
1162 struct pci_bus_region *region);
1163 void pcibios_scan_specific_bus(int busn);
1164 struct pci_bus *pci_find_bus(int domain, int busnr);
1165 void pci_bus_add_devices(const struct pci_bus *bus);
1166 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata);
1167 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
1168 struct pci_ops *ops, void *sysdata,
1169 struct list_head *resources);
1170 int pci_host_probe(struct pci_host_bridge *bridge);
1171 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax);
1172 int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax);
1173 void pci_bus_release_busn_res(struct pci_bus *b);
1174 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
1175 struct pci_ops *ops, void *sysdata,
1176 struct list_head *resources);
1177 int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge);
1178 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
1179 int busnr);
1180 struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
1181 const char *name,
1182 struct hotplug_slot *hotplug);
1183 void pci_destroy_slot(struct pci_slot *slot);
1184 #ifdef CONFIG_SYSFS
1185 void pci_dev_assign_slot(struct pci_dev *dev);
1186 #else
pci_dev_assign_slot(struct pci_dev * dev)1187 static inline void pci_dev_assign_slot(struct pci_dev *dev) { }
1188 #endif
1189 int pci_scan_slot(struct pci_bus *bus, int devfn);
1190 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn);
1191 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus);
1192 unsigned int pci_scan_child_bus(struct pci_bus *bus);
1193 void pci_bus_add_device(struct pci_dev *dev);
1194 void pci_read_bridge_bases(struct pci_bus *child);
1195 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
1196 struct resource *res);
1197 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin);
1198 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
1199 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
1200 struct pci_dev *pci_dev_get(struct pci_dev *dev);
1201 void pci_dev_put(struct pci_dev *dev);
1202 DEFINE_FREE(pci_dev_put, struct pci_dev *, if (_T) pci_dev_put(_T))
1203 void pci_remove_bus(struct pci_bus *b);
1204 void pci_stop_and_remove_bus_device(struct pci_dev *dev);
1205 void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev);
1206 void pci_stop_root_bus(struct pci_bus *bus);
1207 void pci_remove_root_bus(struct pci_bus *bus);
1208 void pci_setup_cardbus(struct pci_bus *bus);
1209 void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type);
1210 void pci_sort_breadthfirst(void);
1211 #define dev_is_pci(d) ((d)->bus == &pci_bus_type)
1212 #define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false))
1213
1214 /* Generic PCI functions exported to card drivers */
1215
1216 u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap);
1217 u8 pci_find_capability(struct pci_dev *dev, int cap);
1218 u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap);
1219 u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap);
1220 u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap);
1221 u16 pci_find_ext_capability(struct pci_dev *dev, int cap);
1222 u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 pos, int cap);
1223 struct pci_bus *pci_find_next_bus(const struct pci_bus *from);
1224 u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap);
1225 u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec);
1226
1227 u64 pci_get_dsn(struct pci_dev *dev);
1228
1229 struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
1230 struct pci_dev *from);
1231 struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
1232 unsigned int ss_vendor, unsigned int ss_device,
1233 struct pci_dev *from);
1234 struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn);
1235 struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus,
1236 unsigned int devfn);
1237 struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from);
1238 struct pci_dev *pci_get_base_class(unsigned int class, struct pci_dev *from);
1239
1240 int pci_dev_present(const struct pci_device_id *ids);
1241
1242 int pci_bus_read_config_byte(struct pci_bus *bus, unsigned int devfn,
1243 int where, u8 *val);
1244 int pci_bus_read_config_word(struct pci_bus *bus, unsigned int devfn,
1245 int where, u16 *val);
1246 int pci_bus_read_config_dword(struct pci_bus *bus, unsigned int devfn,
1247 int where, u32 *val);
1248 int pci_bus_write_config_byte(struct pci_bus *bus, unsigned int devfn,
1249 int where, u8 val);
1250 int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn,
1251 int where, u16 val);
1252 int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn,
1253 int where, u32 val);
1254
1255 int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
1256 int where, int size, u32 *val);
1257 int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
1258 int where, int size, u32 val);
1259 int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
1260 int where, int size, u32 *val);
1261 int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
1262 int where, int size, u32 val);
1263
1264 struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops);
1265
1266 int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val);
1267 int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val);
1268 int pci_read_config_dword(const struct pci_dev *dev, int where, u32 *val);
1269 int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val);
1270 int pci_write_config_word(const struct pci_dev *dev, int where, u16 val);
1271 int pci_write_config_dword(const struct pci_dev *dev, int where, u32 val);
1272 void pci_clear_and_set_config_dword(const struct pci_dev *dev, int pos,
1273 u32 clear, u32 set);
1274
1275 int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
1276 int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val);
1277 int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val);
1278 int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val);
1279 int pcie_capability_clear_and_set_word_unlocked(struct pci_dev *dev, int pos,
1280 u16 clear, u16 set);
1281 int pcie_capability_clear_and_set_word_locked(struct pci_dev *dev, int pos,
1282 u16 clear, u16 set);
1283 int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
1284 u32 clear, u32 set);
1285
1286 /**
1287 * pcie_capability_clear_and_set_word - RMW accessor for PCI Express Capability Registers
1288 * @dev: PCI device structure of the PCI Express device
1289 * @pos: PCI Express Capability Register
1290 * @clear: Clear bitmask
1291 * @set: Set bitmask
1292 *
1293 * Perform a Read-Modify-Write (RMW) operation using @clear and @set
1294 * bitmasks on PCI Express Capability Register at @pos. Certain PCI Express
1295 * Capability Registers are accessed concurrently in RMW fashion, hence
1296 * require locking which is handled transparently to the caller.
1297 */
pcie_capability_clear_and_set_word(struct pci_dev * dev,int pos,u16 clear,u16 set)1298 static inline int pcie_capability_clear_and_set_word(struct pci_dev *dev,
1299 int pos,
1300 u16 clear, u16 set)
1301 {
1302 switch (pos) {
1303 case PCI_EXP_LNKCTL:
1304 case PCI_EXP_RTCTL:
1305 return pcie_capability_clear_and_set_word_locked(dev, pos,
1306 clear, set);
1307 default:
1308 return pcie_capability_clear_and_set_word_unlocked(dev, pos,
1309 clear, set);
1310 }
1311 }
1312
pcie_capability_set_word(struct pci_dev * dev,int pos,u16 set)1313 static inline int pcie_capability_set_word(struct pci_dev *dev, int pos,
1314 u16 set)
1315 {
1316 return pcie_capability_clear_and_set_word(dev, pos, 0, set);
1317 }
1318
pcie_capability_set_dword(struct pci_dev * dev,int pos,u32 set)1319 static inline int pcie_capability_set_dword(struct pci_dev *dev, int pos,
1320 u32 set)
1321 {
1322 return pcie_capability_clear_and_set_dword(dev, pos, 0, set);
1323 }
1324
pcie_capability_clear_word(struct pci_dev * dev,int pos,u16 clear)1325 static inline int pcie_capability_clear_word(struct pci_dev *dev, int pos,
1326 u16 clear)
1327 {
1328 return pcie_capability_clear_and_set_word(dev, pos, clear, 0);
1329 }
1330
pcie_capability_clear_dword(struct pci_dev * dev,int pos,u32 clear)1331 static inline int pcie_capability_clear_dword(struct pci_dev *dev, int pos,
1332 u32 clear)
1333 {
1334 return pcie_capability_clear_and_set_dword(dev, pos, clear, 0);
1335 }
1336
1337 /* User-space driven config access */
1338 int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val);
1339 int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val);
1340 int pci_user_read_config_dword(struct pci_dev *dev, int where, u32 *val);
1341 int pci_user_write_config_byte(struct pci_dev *dev, int where, u8 val);
1342 int pci_user_write_config_word(struct pci_dev *dev, int where, u16 val);
1343 int pci_user_write_config_dword(struct pci_dev *dev, int where, u32 val);
1344
1345 int __must_check pci_enable_device(struct pci_dev *dev);
1346 int __must_check pci_enable_device_mem(struct pci_dev *dev);
1347 int __must_check pci_reenable_device(struct pci_dev *);
1348 int __must_check pcim_enable_device(struct pci_dev *pdev);
1349 void pcim_pin_device(struct pci_dev *pdev);
1350
pci_intx_mask_supported(struct pci_dev * pdev)1351 static inline bool pci_intx_mask_supported(struct pci_dev *pdev)
1352 {
1353 /*
1354 * INTx masking is supported if PCI_COMMAND_INTX_DISABLE is
1355 * writable and no quirk has marked the feature broken.
1356 */
1357 return !pdev->broken_intx_masking;
1358 }
1359
pci_is_enabled(struct pci_dev * pdev)1360 static inline int pci_is_enabled(struct pci_dev *pdev)
1361 {
1362 return (atomic_read(&pdev->enable_cnt) > 0);
1363 }
1364
pci_is_managed(struct pci_dev * pdev)1365 static inline int pci_is_managed(struct pci_dev *pdev)
1366 {
1367 return pdev->is_managed;
1368 }
1369
1370 void pci_disable_device(struct pci_dev *dev);
1371
1372 extern unsigned int pcibios_max_latency;
1373 void pci_set_master(struct pci_dev *dev);
1374 void pci_clear_master(struct pci_dev *dev);
1375
1376 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state);
1377 int pci_set_cacheline_size(struct pci_dev *dev);
1378 int __must_check pci_set_mwi(struct pci_dev *dev);
1379 int __must_check pcim_set_mwi(struct pci_dev *dev);
1380 int pci_try_set_mwi(struct pci_dev *dev);
1381 void pci_clear_mwi(struct pci_dev *dev);
1382 void pci_disable_parity(struct pci_dev *dev);
1383 void pci_intx(struct pci_dev *dev, int enable);
1384 bool pci_check_and_mask_intx(struct pci_dev *dev);
1385 bool pci_check_and_unmask_intx(struct pci_dev *dev);
1386 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask);
1387 int pci_wait_for_pending_transaction(struct pci_dev *dev);
1388 int pcix_get_max_mmrbc(struct pci_dev *dev);
1389 int pcix_get_mmrbc(struct pci_dev *dev);
1390 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc);
1391 int pcie_get_readrq(struct pci_dev *dev);
1392 int pcie_set_readrq(struct pci_dev *dev, int rq);
1393 int pcie_get_mps(struct pci_dev *dev);
1394 int pcie_set_mps(struct pci_dev *dev, int mps);
1395 u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
1396 enum pci_bus_speed *speed,
1397 enum pcie_link_width *width);
1398 int pcie_link_speed_mbps(struct pci_dev *pdev);
1399 void pcie_print_link_status(struct pci_dev *dev);
1400 int pcie_reset_flr(struct pci_dev *dev, bool probe);
1401 int pcie_flr(struct pci_dev *dev);
1402 int __pci_reset_function_locked(struct pci_dev *dev);
1403 int pci_reset_function(struct pci_dev *dev);
1404 int pci_reset_function_locked(struct pci_dev *dev);
1405 int pci_try_reset_function(struct pci_dev *dev);
1406 int pci_probe_reset_slot(struct pci_slot *slot);
1407 int pci_probe_reset_bus(struct pci_bus *bus);
1408 int pci_reset_bus(struct pci_dev *dev);
1409 void pci_reset_secondary_bus(struct pci_dev *dev);
1410 void pcibios_reset_secondary_bus(struct pci_dev *dev);
1411 void pci_update_resource(struct pci_dev *dev, int resno);
1412 int __must_check pci_assign_resource(struct pci_dev *dev, int i);
1413 int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
1414 void pci_release_resource(struct pci_dev *dev, int resno);
pci_rebar_bytes_to_size(u64 bytes)1415 static inline int pci_rebar_bytes_to_size(u64 bytes)
1416 {
1417 bytes = roundup_pow_of_two(bytes);
1418
1419 /* Return BAR size as defined in the resizable BAR specification */
1420 return max(ilog2(bytes), 20) - 20;
1421 }
1422
1423 u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar);
1424 int __must_check pci_resize_resource(struct pci_dev *dev, int i, int size);
1425 int pci_select_bars(struct pci_dev *dev, unsigned long flags);
1426 bool pci_device_is_present(struct pci_dev *pdev);
1427 void pci_ignore_hotplug(struct pci_dev *dev);
1428 struct pci_dev *pci_real_dma_dev(struct pci_dev *dev);
1429 int pci_status_get_and_clear_errors(struct pci_dev *pdev);
1430
1431 int __printf(6, 7) pci_request_irq(struct pci_dev *dev, unsigned int nr,
1432 irq_handler_t handler, irq_handler_t thread_fn, void *dev_id,
1433 const char *fmt, ...);
1434 void pci_free_irq(struct pci_dev *dev, unsigned int nr, void *dev_id);
1435
1436 /* ROM control related routines */
1437 int pci_enable_rom(struct pci_dev *pdev);
1438 void pci_disable_rom(struct pci_dev *pdev);
1439 void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);
1440 void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom);
1441
1442 /* Power management related routines */
1443 int pci_save_state(struct pci_dev *dev);
1444 void pci_restore_state(struct pci_dev *dev);
1445 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev);
1446 int pci_load_saved_state(struct pci_dev *dev,
1447 struct pci_saved_state *state);
1448 int pci_load_and_free_saved_state(struct pci_dev *dev,
1449 struct pci_saved_state **state);
1450 int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state);
1451 int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
1452 int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state);
1453 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
1454 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
1455 void pci_pme_active(struct pci_dev *dev, bool enable);
1456 int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable);
1457 int pci_wake_from_d3(struct pci_dev *dev, bool enable);
1458 int pci_prepare_to_sleep(struct pci_dev *dev);
1459 int pci_back_from_sleep(struct pci_dev *dev);
1460 bool pci_dev_run_wake(struct pci_dev *dev);
1461 void pci_d3cold_enable(struct pci_dev *dev);
1462 void pci_d3cold_disable(struct pci_dev *dev);
1463 bool pcie_relaxed_ordering_enabled(struct pci_dev *dev);
1464 void pci_resume_bus(struct pci_bus *bus);
1465 void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state);
1466
1467 /* For use by arch with custom probe code */
1468 void set_pcie_port_type(struct pci_dev *pdev);
1469 void set_pcie_hotplug_bridge(struct pci_dev *pdev);
1470
1471 /* Functions for PCI Hotplug drivers to use */
1472 unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge);
1473 unsigned int pci_rescan_bus(struct pci_bus *bus);
1474 void pci_lock_rescan_remove(void);
1475 void pci_unlock_rescan_remove(void);
1476
1477 /* Vital Product Data routines */
1478 ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
1479 ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
1480 ssize_t pci_read_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
1481 ssize_t pci_write_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
1482
1483 /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
1484 resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
1485 void pci_bus_assign_resources(const struct pci_bus *bus);
1486 void pci_bus_claim_resources(struct pci_bus *bus);
1487 void pci_bus_size_bridges(struct pci_bus *bus);
1488 int pci_claim_resource(struct pci_dev *, int);
1489 int pci_claim_bridge_resource(struct pci_dev *bridge, int i);
1490 void pci_assign_unassigned_resources(void);
1491 void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge);
1492 void pci_assign_unassigned_bus_resources(struct pci_bus *bus);
1493 void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus);
1494 int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type);
1495 int pci_enable_resources(struct pci_dev *, int mask);
1496 void pci_assign_irq(struct pci_dev *dev);
1497 struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res);
1498 #define HAVE_PCI_REQ_REGIONS 2
1499 int __must_check pci_request_regions(struct pci_dev *, const char *);
1500 int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *);
1501 void pci_release_regions(struct pci_dev *);
1502 int __must_check pci_request_region(struct pci_dev *, int, const char *);
1503 void pci_release_region(struct pci_dev *, int);
1504 int pci_request_selected_regions(struct pci_dev *, int, const char *);
1505 int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *);
1506 void pci_release_selected_regions(struct pci_dev *, int);
1507
1508 static inline __must_check struct resource *
pci_request_config_region_exclusive(struct pci_dev * pdev,unsigned int offset,unsigned int len,const char * name)1509 pci_request_config_region_exclusive(struct pci_dev *pdev, unsigned int offset,
1510 unsigned int len, const char *name)
1511 {
1512 return __request_region(&pdev->driver_exclusive_resource, offset, len,
1513 name, IORESOURCE_EXCLUSIVE);
1514 }
1515
pci_release_config_region(struct pci_dev * pdev,unsigned int offset,unsigned int len)1516 static inline void pci_release_config_region(struct pci_dev *pdev,
1517 unsigned int offset,
1518 unsigned int len)
1519 {
1520 __release_region(&pdev->driver_exclusive_resource, offset, len);
1521 }
1522
1523 /* drivers/pci/bus.c */
1524 void pci_add_resource(struct list_head *resources, struct resource *res);
1525 void pci_add_resource_offset(struct list_head *resources, struct resource *res,
1526 resource_size_t offset);
1527 void pci_free_resource_list(struct list_head *resources);
1528 void pci_bus_add_resource(struct pci_bus *bus, struct resource *res,
1529 unsigned int flags);
1530 struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n);
1531 void pci_bus_remove_resources(struct pci_bus *bus);
1532 void pci_bus_remove_resource(struct pci_bus *bus, struct resource *res);
1533 int devm_request_pci_bus_resources(struct device *dev,
1534 struct list_head *resources);
1535
1536 /* Temporary until new and working PCI SBR API in place */
1537 int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
1538
1539 #define __pci_bus_for_each_res0(bus, res, ...) \
1540 for (unsigned int __b = 0; \
1541 (res = pci_bus_resource_n(bus, __b)) || __b < PCI_BRIDGE_RESOURCE_NUM; \
1542 __b++)
1543
1544 #define __pci_bus_for_each_res1(bus, res, __b) \
1545 for (__b = 0; \
1546 (res = pci_bus_resource_n(bus, __b)) || __b < PCI_BRIDGE_RESOURCE_NUM; \
1547 __b++)
1548
1549 /**
1550 * pci_bus_for_each_resource - iterate over PCI bus resources
1551 * @bus: the PCI bus
1552 * @res: pointer to the current resource
1553 * @...: optional index of the current resource
1554 *
1555 * Iterate over PCI bus resources. The first part is to go over PCI bus
1556 * resource array, which has at most the %PCI_BRIDGE_RESOURCE_NUM entries.
1557 * After that continue with the separate list of the additional resources,
1558 * if not empty. That's why the Logical OR is being used.
1559 *
1560 * Possible usage:
1561 *
1562 * struct pci_bus *bus = ...;
1563 * struct resource *res;
1564 * unsigned int i;
1565 *
1566 * // With optional index
1567 * pci_bus_for_each_resource(bus, res, i)
1568 * pr_info("PCI bus resource[%u]: %pR\n", i, res);
1569 *
1570 * // Without index
1571 * pci_bus_for_each_resource(bus, res)
1572 * _do_something_(res);
1573 */
1574 #define pci_bus_for_each_resource(bus, res, ...) \
1575 CONCATENATE(__pci_bus_for_each_res, COUNT_ARGS(__VA_ARGS__)) \
1576 (bus, res, __VA_ARGS__)
1577
1578 int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
1579 struct resource *res, resource_size_t size,
1580 resource_size_t align, resource_size_t min,
1581 unsigned long type_mask,
1582 resource_alignf alignf,
1583 void *alignf_data);
1584
1585
1586 int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
1587 resource_size_t size);
1588 unsigned long pci_address_to_pio(phys_addr_t addr);
1589 phys_addr_t pci_pio_to_address(unsigned long pio);
1590 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
1591 int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
1592 phys_addr_t phys_addr);
1593 void pci_unmap_iospace(struct resource *res);
1594 void __iomem *devm_pci_remap_cfgspace(struct device *dev,
1595 resource_size_t offset,
1596 resource_size_t size);
1597 void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
1598 struct resource *res);
1599
pci_bus_address(struct pci_dev * pdev,int bar)1600 static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
1601 {
1602 struct pci_bus_region region;
1603
1604 pcibios_resource_to_bus(pdev->bus, ®ion, &pdev->resource[bar]);
1605 return region.start;
1606 }
1607
1608 /* Proper probing supporting hot-pluggable devices */
1609 int __must_check __pci_register_driver(struct pci_driver *, struct module *,
1610 const char *mod_name);
1611
1612 /* pci_register_driver() must be a macro so KBUILD_MODNAME can be expanded */
1613 #define pci_register_driver(driver) \
1614 __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
1615
1616 void pci_unregister_driver(struct pci_driver *dev);
1617
1618 /**
1619 * module_pci_driver() - Helper macro for registering a PCI driver
1620 * @__pci_driver: pci_driver struct
1621 *
1622 * Helper macro for PCI drivers which do not do anything special in module
1623 * init/exit. This eliminates a lot of boilerplate. Each module may only
1624 * use this macro once, and calling it replaces module_init() and module_exit()
1625 */
1626 #define module_pci_driver(__pci_driver) \
1627 module_driver(__pci_driver, pci_register_driver, pci_unregister_driver)
1628
1629 /**
1630 * builtin_pci_driver() - Helper macro for registering a PCI driver
1631 * @__pci_driver: pci_driver struct
1632 *
1633 * Helper macro for PCI drivers which do not do anything special in their
1634 * init code. This eliminates a lot of boilerplate. Each driver may only
1635 * use this macro once, and calling it replaces device_initcall(...)
1636 */
1637 #define builtin_pci_driver(__pci_driver) \
1638 builtin_driver(__pci_driver, pci_register_driver)
1639
1640 struct pci_driver *pci_dev_driver(const struct pci_dev *dev);
1641 int pci_add_dynid(struct pci_driver *drv,
1642 unsigned int vendor, unsigned int device,
1643 unsigned int subvendor, unsigned int subdevice,
1644 unsigned int class, unsigned int class_mask,
1645 unsigned long driver_data);
1646 const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
1647 struct pci_dev *dev);
1648 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
1649 int pass);
1650
1651 void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
1652 void *userdata);
1653 void pci_walk_bus_locked(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
1654 void *userdata);
1655 int pci_cfg_space_size(struct pci_dev *dev);
1656 unsigned char pci_bus_max_busnr(struct pci_bus *bus);
1657 void pci_setup_bridge(struct pci_bus *bus);
1658 resource_size_t pcibios_window_alignment(struct pci_bus *bus,
1659 unsigned long type);
1660
1661 #define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0)
1662 #define PCI_VGA_STATE_CHANGE_DECODES (1 << 1)
1663
1664 int pci_set_vga_state(struct pci_dev *pdev, bool decode,
1665 unsigned int command_bits, u32 flags);
1666
1667 /*
1668 * Virtual interrupts allow for more interrupts to be allocated
1669 * than the device has interrupts for. These are not programmed
1670 * into the device's MSI-X table and must be handled by some
1671 * other driver means.
1672 */
1673 #define PCI_IRQ_VIRTUAL (1 << 4)
1674
1675 #define PCI_IRQ_ALL_TYPES (PCI_IRQ_INTX | PCI_IRQ_MSI | PCI_IRQ_MSIX)
1676
1677 #include <linux/dmapool.h>
1678
1679 struct msix_entry {
1680 u32 vector; /* Kernel uses to write allocated vector */
1681 u16 entry; /* Driver uses to specify entry, OS writes */
1682 };
1683
1684 #ifdef CONFIG_PCI_MSI
1685 int pci_msi_vec_count(struct pci_dev *dev);
1686 void pci_disable_msi(struct pci_dev *dev);
1687 int pci_msix_vec_count(struct pci_dev *dev);
1688 void pci_disable_msix(struct pci_dev *dev);
1689 void pci_restore_msi_state(struct pci_dev *dev);
1690 int pci_msi_enabled(void);
1691 int pci_enable_msi(struct pci_dev *dev);
1692 int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
1693 int minvec, int maxvec);
pci_enable_msix_exact(struct pci_dev * dev,struct msix_entry * entries,int nvec)1694 static inline int pci_enable_msix_exact(struct pci_dev *dev,
1695 struct msix_entry *entries, int nvec)
1696 {
1697 int rc = pci_enable_msix_range(dev, entries, nvec, nvec);
1698 if (rc < 0)
1699 return rc;
1700 return 0;
1701 }
1702 int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
1703 unsigned int max_vecs, unsigned int flags);
1704 int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1705 unsigned int max_vecs, unsigned int flags,
1706 struct irq_affinity *affd);
1707
1708 bool pci_msix_can_alloc_dyn(struct pci_dev *dev);
1709 struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index,
1710 const struct irq_affinity_desc *affdesc);
1711 void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map);
1712
1713 void pci_free_irq_vectors(struct pci_dev *dev);
1714 int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
1715 const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec);
1716
1717 #else
pci_msi_vec_count(struct pci_dev * dev)1718 static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
pci_disable_msi(struct pci_dev * dev)1719 static inline void pci_disable_msi(struct pci_dev *dev) { }
pci_msix_vec_count(struct pci_dev * dev)1720 static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; }
pci_disable_msix(struct pci_dev * dev)1721 static inline void pci_disable_msix(struct pci_dev *dev) { }
pci_restore_msi_state(struct pci_dev * dev)1722 static inline void pci_restore_msi_state(struct pci_dev *dev) { }
pci_msi_enabled(void)1723 static inline int pci_msi_enabled(void) { return 0; }
pci_enable_msi(struct pci_dev * dev)1724 static inline int pci_enable_msi(struct pci_dev *dev)
1725 { return -ENOSYS; }
pci_enable_msix_range(struct pci_dev * dev,struct msix_entry * entries,int minvec,int maxvec)1726 static inline int pci_enable_msix_range(struct pci_dev *dev,
1727 struct msix_entry *entries, int minvec, int maxvec)
1728 { return -ENOSYS; }
pci_enable_msix_exact(struct pci_dev * dev,struct msix_entry * entries,int nvec)1729 static inline int pci_enable_msix_exact(struct pci_dev *dev,
1730 struct msix_entry *entries, int nvec)
1731 { return -ENOSYS; }
1732
1733 static inline int
pci_alloc_irq_vectors_affinity(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags,struct irq_affinity * aff_desc)1734 pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1735 unsigned int max_vecs, unsigned int flags,
1736 struct irq_affinity *aff_desc)
1737 {
1738 if ((flags & PCI_IRQ_INTX) && min_vecs == 1 && dev->irq)
1739 return 1;
1740 return -ENOSPC;
1741 }
1742 static inline int
pci_alloc_irq_vectors(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags)1743 pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
1744 unsigned int max_vecs, unsigned int flags)
1745 {
1746 return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs,
1747 flags, NULL);
1748 }
1749
pci_msix_can_alloc_dyn(struct pci_dev * dev)1750 static inline bool pci_msix_can_alloc_dyn(struct pci_dev *dev)
1751 { return false; }
pci_msix_alloc_irq_at(struct pci_dev * dev,unsigned int index,const struct irq_affinity_desc * affdesc)1752 static inline struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index,
1753 const struct irq_affinity_desc *affdesc)
1754 {
1755 struct msi_map map = { .index = -ENOSYS, };
1756
1757 return map;
1758 }
1759
pci_msix_free_irq(struct pci_dev * pdev,struct msi_map map)1760 static inline void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map)
1761 {
1762 }
1763
pci_free_irq_vectors(struct pci_dev * dev)1764 static inline void pci_free_irq_vectors(struct pci_dev *dev)
1765 {
1766 }
1767
pci_irq_vector(struct pci_dev * dev,unsigned int nr)1768 static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
1769 {
1770 if (WARN_ON_ONCE(nr > 0))
1771 return -EINVAL;
1772 return dev->irq;
1773 }
pci_irq_get_affinity(struct pci_dev * pdev,int vec)1774 static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev,
1775 int vec)
1776 {
1777 return cpu_possible_mask;
1778 }
1779 #endif
1780
1781 /**
1782 * pci_irqd_intx_xlate() - Translate PCI INTx value to an IRQ domain hwirq
1783 * @d: the INTx IRQ domain
1784 * @node: the DT node for the device whose interrupt we're translating
1785 * @intspec: the interrupt specifier data from the DT
1786 * @intsize: the number of entries in @intspec
1787 * @out_hwirq: pointer at which to write the hwirq number
1788 * @out_type: pointer at which to write the interrupt type
1789 *
1790 * Translate a PCI INTx interrupt number from device tree in the range 1-4, as
1791 * stored in the standard PCI_INTERRUPT_PIN register, to a value in the range
1792 * 0-3 suitable for use in a 4 entry IRQ domain. That is, subtract one from the
1793 * INTx value to obtain the hwirq number.
1794 *
1795 * Returns 0 on success, or -EINVAL if the interrupt specifier is out of range.
1796 */
pci_irqd_intx_xlate(struct irq_domain * d,struct device_node * node,const u32 * intspec,unsigned int intsize,unsigned long * out_hwirq,unsigned int * out_type)1797 static inline int pci_irqd_intx_xlate(struct irq_domain *d,
1798 struct device_node *node,
1799 const u32 *intspec,
1800 unsigned int intsize,
1801 unsigned long *out_hwirq,
1802 unsigned int *out_type)
1803 {
1804 const u32 intx = intspec[0];
1805
1806 if (intx < PCI_INTERRUPT_INTA || intx > PCI_INTERRUPT_INTD)
1807 return -EINVAL;
1808
1809 *out_hwirq = intx - PCI_INTERRUPT_INTA;
1810 return 0;
1811 }
1812
1813 #ifdef CONFIG_PCIEPORTBUS
1814 extern bool pcie_ports_disabled;
1815 extern bool pcie_ports_native;
1816 #else
1817 #define pcie_ports_disabled true
1818 #define pcie_ports_native false
1819 #endif
1820
1821 #define PCIE_LINK_STATE_L0S (BIT(0) | BIT(1)) /* Upstr/dwnstr L0s */
1822 #define PCIE_LINK_STATE_L1 BIT(2) /* L1 state */
1823 #define PCIE_LINK_STATE_L1_1 BIT(3) /* ASPM L1.1 state */
1824 #define PCIE_LINK_STATE_L1_2 BIT(4) /* ASPM L1.2 state */
1825 #define PCIE_LINK_STATE_L1_1_PCIPM BIT(5) /* PCI-PM L1.1 state */
1826 #define PCIE_LINK_STATE_L1_2_PCIPM BIT(6) /* PCI-PM L1.2 state */
1827 #define PCIE_LINK_STATE_ASPM_ALL (PCIE_LINK_STATE_L0S |\
1828 PCIE_LINK_STATE_L1 |\
1829 PCIE_LINK_STATE_L1_1 |\
1830 PCIE_LINK_STATE_L1_2 |\
1831 PCIE_LINK_STATE_L1_1_PCIPM |\
1832 PCIE_LINK_STATE_L1_2_PCIPM)
1833 #define PCIE_LINK_STATE_CLKPM BIT(7)
1834 #define PCIE_LINK_STATE_ALL (PCIE_LINK_STATE_ASPM_ALL |\
1835 PCIE_LINK_STATE_CLKPM)
1836
1837 #ifdef CONFIG_PCIEASPM
1838 int pci_disable_link_state(struct pci_dev *pdev, int state);
1839 int pci_disable_link_state_locked(struct pci_dev *pdev, int state);
1840 int pci_enable_link_state(struct pci_dev *pdev, int state);
1841 int pci_enable_link_state_locked(struct pci_dev *pdev, int state);
1842 void pcie_no_aspm(void);
1843 bool pcie_aspm_support_enabled(void);
1844 bool pcie_aspm_enabled(struct pci_dev *pdev);
1845 #else
pci_disable_link_state(struct pci_dev * pdev,int state)1846 static inline int pci_disable_link_state(struct pci_dev *pdev, int state)
1847 { return 0; }
pci_disable_link_state_locked(struct pci_dev * pdev,int state)1848 static inline int pci_disable_link_state_locked(struct pci_dev *pdev, int state)
1849 { return 0; }
pci_enable_link_state(struct pci_dev * pdev,int state)1850 static inline int pci_enable_link_state(struct pci_dev *pdev, int state)
1851 { return 0; }
pci_enable_link_state_locked(struct pci_dev * pdev,int state)1852 static inline int pci_enable_link_state_locked(struct pci_dev *pdev, int state)
1853 { return 0; }
pcie_no_aspm(void)1854 static inline void pcie_no_aspm(void) { }
pcie_aspm_support_enabled(void)1855 static inline bool pcie_aspm_support_enabled(void) { return false; }
pcie_aspm_enabled(struct pci_dev * pdev)1856 static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; }
1857 #endif
1858
1859 #ifdef CONFIG_PCIEAER
1860 bool pci_aer_available(void);
1861 #else
pci_aer_available(void)1862 static inline bool pci_aer_available(void) { return false; }
1863 #endif
1864
1865 bool pci_ats_disabled(void);
1866
1867 #ifdef CONFIG_PCIE_PTM
1868 int pci_enable_ptm(struct pci_dev *dev, u8 *granularity);
1869 void pci_disable_ptm(struct pci_dev *dev);
1870 bool pcie_ptm_enabled(struct pci_dev *dev);
1871 #else
pci_enable_ptm(struct pci_dev * dev,u8 * granularity)1872 static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity)
1873 { return -EINVAL; }
pci_disable_ptm(struct pci_dev * dev)1874 static inline void pci_disable_ptm(struct pci_dev *dev) { }
pcie_ptm_enabled(struct pci_dev * dev)1875 static inline bool pcie_ptm_enabled(struct pci_dev *dev)
1876 { return false; }
1877 #endif
1878
1879 void pci_cfg_access_lock(struct pci_dev *dev);
1880 bool pci_cfg_access_trylock(struct pci_dev *dev);
1881 void pci_cfg_access_unlock(struct pci_dev *dev);
1882
1883 void pci_dev_lock(struct pci_dev *dev);
1884 int pci_dev_trylock(struct pci_dev *dev);
1885 void pci_dev_unlock(struct pci_dev *dev);
1886 DEFINE_GUARD(pci_dev, struct pci_dev *, pci_dev_lock(_T), pci_dev_unlock(_T))
1887
1888 /*
1889 * PCI domain support. Sometimes called PCI segment (eg by ACPI),
1890 * a PCI domain is defined to be a set of PCI buses which share
1891 * configuration space.
1892 */
1893 #ifdef CONFIG_PCI_DOMAINS
1894 extern int pci_domains_supported;
1895 #else
1896 enum { pci_domains_supported = 0 };
1897 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
1898 static inline int pci_proc_domain(struct pci_bus *bus) { return 0; }
1899 #endif /* CONFIG_PCI_DOMAINS */
1900
1901 /*
1902 * Generic implementation for PCI domain support. If your
1903 * architecture does not need custom management of PCI
1904 * domains then this implementation will be used
1905 */
1906 #ifdef CONFIG_PCI_DOMAINS_GENERIC
pci_domain_nr(struct pci_bus * bus)1907 static inline int pci_domain_nr(struct pci_bus *bus)
1908 {
1909 return bus->domain_nr;
1910 }
1911 #ifdef CONFIG_ACPI
1912 int acpi_pci_bus_find_domain_nr(struct pci_bus *bus);
1913 #else
acpi_pci_bus_find_domain_nr(struct pci_bus * bus)1914 static inline int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
1915 { return 0; }
1916 #endif
1917 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent);
1918 void pci_bus_release_domain_nr(struct device *parent, int domain_nr);
1919 #endif
1920
1921 /* Some architectures require additional setup to direct VGA traffic */
1922 typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
1923 unsigned int command_bits, u32 flags);
1924 void pci_register_set_vga_state(arch_set_vga_state_t func);
1925
1926 static inline int
pci_request_io_regions(struct pci_dev * pdev,const char * name)1927 pci_request_io_regions(struct pci_dev *pdev, const char *name)
1928 {
1929 return pci_request_selected_regions(pdev,
1930 pci_select_bars(pdev, IORESOURCE_IO), name);
1931 }
1932
1933 static inline void
pci_release_io_regions(struct pci_dev * pdev)1934 pci_release_io_regions(struct pci_dev *pdev)
1935 {
1936 return pci_release_selected_regions(pdev,
1937 pci_select_bars(pdev, IORESOURCE_IO));
1938 }
1939
1940 static inline int
pci_request_mem_regions(struct pci_dev * pdev,const char * name)1941 pci_request_mem_regions(struct pci_dev *pdev, const char *name)
1942 {
1943 return pci_request_selected_regions(pdev,
1944 pci_select_bars(pdev, IORESOURCE_MEM), name);
1945 }
1946
1947 static inline void
pci_release_mem_regions(struct pci_dev * pdev)1948 pci_release_mem_regions(struct pci_dev *pdev)
1949 {
1950 return pci_release_selected_regions(pdev,
1951 pci_select_bars(pdev, IORESOURCE_MEM));
1952 }
1953
1954 #else /* CONFIG_PCI is not enabled */
1955
pci_set_flags(int flags)1956 static inline void pci_set_flags(int flags) { }
pci_add_flags(int flags)1957 static inline void pci_add_flags(int flags) { }
pci_clear_flags(int flags)1958 static inline void pci_clear_flags(int flags) { }
pci_has_flag(int flag)1959 static inline int pci_has_flag(int flag) { return 0; }
1960
1961 /*
1962 * If the system does not have PCI, clearly these return errors. Define
1963 * these as simple inline functions to avoid hair in drivers.
1964 */
1965 #define _PCI_NOP(o, s, t) \
1966 static inline int pci_##o##_config_##s(struct pci_dev *dev, \
1967 int where, t val) \
1968 { return PCIBIOS_FUNC_NOT_SUPPORTED; }
1969
1970 #define _PCI_NOP_ALL(o, x) _PCI_NOP(o, byte, u8 x) \
1971 _PCI_NOP(o, word, u16 x) \
1972 _PCI_NOP(o, dword, u32 x)
1973 _PCI_NOP_ALL(read, *)
1974 _PCI_NOP_ALL(write,)
1975
pci_get_device(unsigned int vendor,unsigned int device,struct pci_dev * from)1976 static inline struct pci_dev *pci_get_device(unsigned int vendor,
1977 unsigned int device,
1978 struct pci_dev *from)
1979 { return NULL; }
1980
pci_get_subsys(unsigned int vendor,unsigned int device,unsigned int ss_vendor,unsigned int ss_device,struct pci_dev * from)1981 static inline struct pci_dev *pci_get_subsys(unsigned int vendor,
1982 unsigned int device,
1983 unsigned int ss_vendor,
1984 unsigned int ss_device,
1985 struct pci_dev *from)
1986 { return NULL; }
1987
pci_get_class(unsigned int class,struct pci_dev * from)1988 static inline struct pci_dev *pci_get_class(unsigned int class,
1989 struct pci_dev *from)
1990 { return NULL; }
1991
pci_get_base_class(unsigned int class,struct pci_dev * from)1992 static inline struct pci_dev *pci_get_base_class(unsigned int class,
1993 struct pci_dev *from)
1994 { return NULL; }
1995
pci_dev_present(const struct pci_device_id * ids)1996 static inline int pci_dev_present(const struct pci_device_id *ids)
1997 { return 0; }
1998
1999 #define no_pci_devices() (1)
2000 #define pci_dev_put(dev) do { } while (0)
2001
pci_set_master(struct pci_dev * dev)2002 static inline void pci_set_master(struct pci_dev *dev) { }
pci_clear_master(struct pci_dev * dev)2003 static inline void pci_clear_master(struct pci_dev *dev) { }
pci_enable_device(struct pci_dev * dev)2004 static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
pci_disable_device(struct pci_dev * dev)2005 static inline void pci_disable_device(struct pci_dev *dev) { }
pcim_enable_device(struct pci_dev * pdev)2006 static inline int pcim_enable_device(struct pci_dev *pdev) { return -EIO; }
pci_assign_resource(struct pci_dev * dev,int i)2007 static inline int pci_assign_resource(struct pci_dev *dev, int i)
2008 { return -EBUSY; }
__pci_register_driver(struct pci_driver * drv,struct module * owner,const char * mod_name)2009 static inline int __must_check __pci_register_driver(struct pci_driver *drv,
2010 struct module *owner,
2011 const char *mod_name)
2012 { return 0; }
pci_register_driver(struct pci_driver * drv)2013 static inline int pci_register_driver(struct pci_driver *drv)
2014 { return 0; }
pci_unregister_driver(struct pci_driver * drv)2015 static inline void pci_unregister_driver(struct pci_driver *drv) { }
pci_find_capability(struct pci_dev * dev,int cap)2016 static inline u8 pci_find_capability(struct pci_dev *dev, int cap)
2017 { return 0; }
pci_find_next_capability(struct pci_dev * dev,u8 post,int cap)2018 static inline u8 pci_find_next_capability(struct pci_dev *dev, u8 post, int cap)
2019 { return 0; }
pci_find_ext_capability(struct pci_dev * dev,int cap)2020 static inline u16 pci_find_ext_capability(struct pci_dev *dev, int cap)
2021 { return 0; }
2022
pci_get_dsn(struct pci_dev * dev)2023 static inline u64 pci_get_dsn(struct pci_dev *dev)
2024 { return 0; }
2025
2026 /* Power management related routines */
pci_save_state(struct pci_dev * dev)2027 static inline int pci_save_state(struct pci_dev *dev) { return 0; }
pci_restore_state(struct pci_dev * dev)2028 static inline void pci_restore_state(struct pci_dev *dev) { }
pci_set_power_state(struct pci_dev * dev,pci_power_t state)2029 static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
2030 { return 0; }
pci_set_power_state_locked(struct pci_dev * dev,pci_power_t state)2031 static inline int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state)
2032 { return 0; }
pci_wake_from_d3(struct pci_dev * dev,bool enable)2033 static inline int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2034 { return 0; }
pci_choose_state(struct pci_dev * dev,pm_message_t state)2035 static inline pci_power_t pci_choose_state(struct pci_dev *dev,
2036 pm_message_t state)
2037 { return PCI_D0; }
pci_enable_wake(struct pci_dev * dev,pci_power_t state,int enable)2038 static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
2039 int enable)
2040 { return 0; }
2041
pci_find_resource(struct pci_dev * dev,struct resource * res)2042 static inline struct resource *pci_find_resource(struct pci_dev *dev,
2043 struct resource *res)
2044 { return NULL; }
pci_request_regions(struct pci_dev * dev,const char * res_name)2045 static inline int pci_request_regions(struct pci_dev *dev, const char *res_name)
2046 { return -EIO; }
pci_release_regions(struct pci_dev * dev)2047 static inline void pci_release_regions(struct pci_dev *dev) { }
2048
pci_register_io_range(struct fwnode_handle * fwnode,phys_addr_t addr,resource_size_t size)2049 static inline int pci_register_io_range(struct fwnode_handle *fwnode,
2050 phys_addr_t addr, resource_size_t size)
2051 { return -EINVAL; }
2052
pci_address_to_pio(phys_addr_t addr)2053 static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; }
2054
pci_find_next_bus(const struct pci_bus * from)2055 static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from)
2056 { return NULL; }
pci_get_slot(struct pci_bus * bus,unsigned int devfn)2057 static inline struct pci_dev *pci_get_slot(struct pci_bus *bus,
2058 unsigned int devfn)
2059 { return NULL; }
pci_get_domain_bus_and_slot(int domain,unsigned int bus,unsigned int devfn)2060 static inline struct pci_dev *pci_get_domain_bus_and_slot(int domain,
2061 unsigned int bus, unsigned int devfn)
2062 { return NULL; }
2063
pci_domain_nr(struct pci_bus * bus)2064 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
pci_dev_get(struct pci_dev * dev)2065 static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; }
2066
2067 #define dev_is_pci(d) (false)
2068 #define dev_is_pf(d) (false)
pci_acs_enabled(struct pci_dev * pdev,u16 acs_flags)2069 static inline bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
2070 { return false; }
pci_irqd_intx_xlate(struct irq_domain * d,struct device_node * node,const u32 * intspec,unsigned int intsize,unsigned long * out_hwirq,unsigned int * out_type)2071 static inline int pci_irqd_intx_xlate(struct irq_domain *d,
2072 struct device_node *node,
2073 const u32 *intspec,
2074 unsigned int intsize,
2075 unsigned long *out_hwirq,
2076 unsigned int *out_type)
2077 { return -EINVAL; }
2078
pci_match_id(const struct pci_device_id * ids,struct pci_dev * dev)2079 static inline const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
2080 struct pci_dev *dev)
2081 { return NULL; }
pci_ats_disabled(void)2082 static inline bool pci_ats_disabled(void) { return true; }
2083
pci_irq_vector(struct pci_dev * dev,unsigned int nr)2084 static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
2085 {
2086 return -EINVAL;
2087 }
2088
2089 static inline int
pci_alloc_irq_vectors_affinity(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags,struct irq_affinity * aff_desc)2090 pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
2091 unsigned int max_vecs, unsigned int flags,
2092 struct irq_affinity *aff_desc)
2093 {
2094 return -ENOSPC;
2095 }
2096 static inline int
pci_alloc_irq_vectors(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags)2097 pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
2098 unsigned int max_vecs, unsigned int flags)
2099 {
2100 return -ENOSPC;
2101 }
2102 #endif /* CONFIG_PCI */
2103
2104 /* Include architecture-dependent settings and functions */
2105
2106 #include <asm/pci.h>
2107
2108 /*
2109 * pci_mmap_resource_range() maps a specific BAR, and vm->vm_pgoff
2110 * is expected to be an offset within that region.
2111 *
2112 */
2113 int pci_mmap_resource_range(struct pci_dev *dev, int bar,
2114 struct vm_area_struct *vma,
2115 enum pci_mmap_state mmap_state, int write_combine);
2116
2117 #ifndef arch_can_pci_mmap_wc
2118 #define arch_can_pci_mmap_wc() 0
2119 #endif
2120
2121 #ifndef arch_can_pci_mmap_io
2122 #define arch_can_pci_mmap_io() 0
2123 #define pci_iobar_pfn(pdev, bar, vma) (-EINVAL)
2124 #else
2125 int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma);
2126 #endif
2127
2128 #ifndef pci_root_bus_fwnode
2129 #define pci_root_bus_fwnode(bus) NULL
2130 #endif
2131
2132 /*
2133 * These helpers provide future and backwards compatibility
2134 * for accessing popular PCI BAR info
2135 */
2136 #define pci_resource_n(dev, bar) (&(dev)->resource[(bar)])
2137 #define pci_resource_start(dev, bar) (pci_resource_n(dev, bar)->start)
2138 #define pci_resource_end(dev, bar) (pci_resource_n(dev, bar)->end)
2139 #define pci_resource_flags(dev, bar) (pci_resource_n(dev, bar)->flags)
2140 #define pci_resource_len(dev,bar) \
2141 (pci_resource_end((dev), (bar)) ? \
2142 resource_size(pci_resource_n((dev), (bar))) : 0)
2143
2144 #define __pci_dev_for_each_res0(dev, res, ...) \
2145 for (unsigned int __b = 0; \
2146 __b < PCI_NUM_RESOURCES && (res = pci_resource_n(dev, __b)); \
2147 __b++)
2148
2149 #define __pci_dev_for_each_res1(dev, res, __b) \
2150 for (__b = 0; \
2151 __b < PCI_NUM_RESOURCES && (res = pci_resource_n(dev, __b)); \
2152 __b++)
2153
2154 #define pci_dev_for_each_resource(dev, res, ...) \
2155 CONCATENATE(__pci_dev_for_each_res, COUNT_ARGS(__VA_ARGS__)) \
2156 (dev, res, __VA_ARGS__)
2157
2158 /*
2159 * Similar to the helpers above, these manipulate per-pci_dev
2160 * driver-specific data. They are really just a wrapper around
2161 * the generic device structure functions of these calls.
2162 */
pci_get_drvdata(struct pci_dev * pdev)2163 static inline void *pci_get_drvdata(struct pci_dev *pdev)
2164 {
2165 return dev_get_drvdata(&pdev->dev);
2166 }
2167
pci_set_drvdata(struct pci_dev * pdev,void * data)2168 static inline void pci_set_drvdata(struct pci_dev *pdev, void *data)
2169 {
2170 dev_set_drvdata(&pdev->dev, data);
2171 }
2172
pci_name(const struct pci_dev * pdev)2173 static inline const char *pci_name(const struct pci_dev *pdev)
2174 {
2175 return dev_name(&pdev->dev);
2176 }
2177
2178 void pci_resource_to_user(const struct pci_dev *dev, int bar,
2179 const struct resource *rsrc,
2180 resource_size_t *start, resource_size_t *end);
2181
2182 /*
2183 * The world is not perfect and supplies us with broken PCI devices.
2184 * For at least a part of these bugs we need a work-around, so both
2185 * generic (drivers/pci/quirks.c) and per-architecture code can define
2186 * fixup hooks to be called for particular buggy devices.
2187 */
2188
2189 struct pci_fixup {
2190 u16 vendor; /* Or PCI_ANY_ID */
2191 u16 device; /* Or PCI_ANY_ID */
2192 u32 class; /* Or PCI_ANY_ID */
2193 unsigned int class_shift; /* should be 0, 8, 16 */
2194 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
2195 int hook_offset;
2196 #else
2197 void (*hook)(struct pci_dev *dev);
2198 #endif
2199 };
2200
2201 enum pci_fixup_pass {
2202 pci_fixup_early, /* Before probing BARs */
2203 pci_fixup_header, /* After reading configuration header */
2204 pci_fixup_final, /* Final phase of device fixups */
2205 pci_fixup_enable, /* pci_enable_device() time */
2206 pci_fixup_resume, /* pci_device_resume() */
2207 pci_fixup_suspend, /* pci_device_suspend() */
2208 pci_fixup_resume_early, /* pci_device_resume_early() */
2209 pci_fixup_suspend_late, /* pci_device_suspend_late() */
2210 };
2211
2212 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
2213 #define ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
2214 class_shift, hook) \
2215 __ADDRESSABLE(hook) \
2216 asm(".section " #sec ", \"a\" \n" \
2217 ".balign 16 \n" \
2218 ".short " #vendor ", " #device " \n" \
2219 ".long " #class ", " #class_shift " \n" \
2220 ".long " #hook " - . \n" \
2221 ".previous \n");
2222
2223 /*
2224 * Clang's LTO may rename static functions in C, but has no way to
2225 * handle such renamings when referenced from inline asm. To work
2226 * around this, create global C stubs for these cases.
2227 */
2228 #ifdef CONFIG_LTO_CLANG
2229 #define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
2230 class_shift, hook, stub) \
2231 void stub(struct pci_dev *dev); \
2232 void stub(struct pci_dev *dev) \
2233 { \
2234 hook(dev); \
2235 } \
2236 ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
2237 class_shift, stub)
2238 #else
2239 #define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
2240 class_shift, hook, stub) \
2241 ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
2242 class_shift, hook)
2243 #endif
2244
2245 #define DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
2246 class_shift, hook) \
2247 __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
2248 class_shift, hook, __UNIQUE_ID(hook))
2249 #else
2250 /* Anonymous variables would be nice... */
2251 #define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class, \
2252 class_shift, hook) \
2253 static const struct pci_fixup __PASTE(__pci_fixup_##name,__LINE__) __used \
2254 __attribute__((__section__(#section), aligned((sizeof(void *))))) \
2255 = { vendor, device, class, class_shift, hook };
2256 #endif
2257
2258 #define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class, \
2259 class_shift, hook) \
2260 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
2261 hook, vendor, device, class, class_shift, hook)
2262 #define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class, \
2263 class_shift, hook) \
2264 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \
2265 hook, vendor, device, class, class_shift, hook)
2266 #define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class, \
2267 class_shift, hook) \
2268 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \
2269 hook, vendor, device, class, class_shift, hook)
2270 #define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class, \
2271 class_shift, hook) \
2272 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \
2273 hook, vendor, device, class, class_shift, hook)
2274 #define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \
2275 class_shift, hook) \
2276 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
2277 resume##hook, vendor, device, class, class_shift, hook)
2278 #define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \
2279 class_shift, hook) \
2280 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
2281 resume_early##hook, vendor, device, class, class_shift, hook)
2282 #define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \
2283 class_shift, hook) \
2284 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
2285 suspend##hook, vendor, device, class, class_shift, hook)
2286 #define DECLARE_PCI_FIXUP_CLASS_SUSPEND_LATE(vendor, device, class, \
2287 class_shift, hook) \
2288 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \
2289 suspend_late##hook, vendor, device, class, class_shift, hook)
2290
2291 #define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \
2292 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
2293 hook, vendor, device, PCI_ANY_ID, 0, hook)
2294 #define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook) \
2295 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \
2296 hook, vendor, device, PCI_ANY_ID, 0, hook)
2297 #define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook) \
2298 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \
2299 hook, vendor, device, PCI_ANY_ID, 0, hook)
2300 #define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook) \
2301 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \
2302 hook, vendor, device, PCI_ANY_ID, 0, hook)
2303 #define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \
2304 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
2305 resume##hook, vendor, device, PCI_ANY_ID, 0, hook)
2306 #define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \
2307 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
2308 resume_early##hook, vendor, device, PCI_ANY_ID, 0, hook)
2309 #define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \
2310 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
2311 suspend##hook, vendor, device, PCI_ANY_ID, 0, hook)
2312 #define DECLARE_PCI_FIXUP_SUSPEND_LATE(vendor, device, hook) \
2313 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \
2314 suspend_late##hook, vendor, device, PCI_ANY_ID, 0, hook)
2315
2316 #ifdef CONFIG_PCI_QUIRKS
2317 void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
2318 #else
pci_fixup_device(enum pci_fixup_pass pass,struct pci_dev * dev)2319 static inline void pci_fixup_device(enum pci_fixup_pass pass,
2320 struct pci_dev *dev) { }
2321 #endif
2322
2323 int pcim_intx(struct pci_dev *pdev, int enabled);
2324 int pcim_request_all_regions(struct pci_dev *pdev, const char *name);
2325 void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
2326 void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar,
2327 const char *name);
2328 void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr);
2329 void __iomem * const *pcim_iomap_table(struct pci_dev *pdev);
2330 int pcim_request_region(struct pci_dev *pdev, int bar, const char *name);
2331 int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name);
2332 int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
2333 const char *name);
2334 void pcim_iounmap_regions(struct pci_dev *pdev, int mask);
2335 void __iomem *pcim_iomap_range(struct pci_dev *pdev, int bar,
2336 unsigned long offset, unsigned long len);
2337
2338 extern int pci_pci_problems;
2339 #define PCIPCI_FAIL 1 /* No PCI PCI DMA */
2340 #define PCIPCI_TRITON 2
2341 #define PCIPCI_NATOMA 4
2342 #define PCIPCI_VIAETBF 8
2343 #define PCIPCI_VSFX 16
2344 #define PCIPCI_ALIMAGIK 32 /* Need low latency setting */
2345 #define PCIAGP_FAIL 64 /* No PCI to AGP DMA */
2346
2347 extern unsigned long pci_cardbus_io_size;
2348 extern unsigned long pci_cardbus_mem_size;
2349 extern u8 pci_dfl_cache_line_size;
2350 extern u8 pci_cache_line_size;
2351
2352 /* Architecture-specific versions may override these (weak) */
2353 void pcibios_disable_device(struct pci_dev *dev);
2354 void pcibios_set_master(struct pci_dev *dev);
2355 int pcibios_set_pcie_reset_state(struct pci_dev *dev,
2356 enum pcie_reset_state state);
2357 int pcibios_device_add(struct pci_dev *dev);
2358 void pcibios_release_device(struct pci_dev *dev);
2359 #ifdef CONFIG_PCI
2360 void pcibios_penalize_isa_irq(int irq, int active);
2361 #else
pcibios_penalize_isa_irq(int irq,int active)2362 static inline void pcibios_penalize_isa_irq(int irq, int active) {}
2363 #endif
2364 int pcibios_alloc_irq(struct pci_dev *dev);
2365 void pcibios_free_irq(struct pci_dev *dev);
2366 resource_size_t pcibios_default_alignment(void);
2367
2368 #if !defined(HAVE_PCI_MMAP) && !defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)
2369 extern int pci_create_resource_files(struct pci_dev *dev);
2370 extern void pci_remove_resource_files(struct pci_dev *dev);
2371 #endif
2372
2373 #if defined(CONFIG_PCI_MMCONFIG) || defined(CONFIG_ACPI_MCFG)
2374 void __init pci_mmcfg_early_init(void);
2375 void __init pci_mmcfg_late_init(void);
2376 #else
pci_mmcfg_early_init(void)2377 static inline void pci_mmcfg_early_init(void) { }
pci_mmcfg_late_init(void)2378 static inline void pci_mmcfg_late_init(void) { }
2379 #endif
2380
2381 int pci_ext_cfg_avail(void);
2382
2383 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar);
2384 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar);
2385
2386 #ifdef CONFIG_PCI_IOV
2387 int pci_iov_virtfn_bus(struct pci_dev *dev, int id);
2388 int pci_iov_virtfn_devfn(struct pci_dev *dev, int id);
2389 int pci_iov_vf_id(struct pci_dev *dev);
2390 void *pci_iov_get_pf_drvdata(struct pci_dev *dev, struct pci_driver *pf_driver);
2391 int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
2392 void pci_disable_sriov(struct pci_dev *dev);
2393
2394 int pci_iov_sysfs_link(struct pci_dev *dev, struct pci_dev *virtfn, int id);
2395 int pci_iov_add_virtfn(struct pci_dev *dev, int id);
2396 void pci_iov_remove_virtfn(struct pci_dev *dev, int id);
2397 int pci_num_vf(struct pci_dev *dev);
2398 int pci_vfs_assigned(struct pci_dev *dev);
2399 int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
2400 int pci_sriov_get_totalvfs(struct pci_dev *dev);
2401 int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn);
2402 resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno);
2403 void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe);
2404
2405 /* Arch may override these (weak) */
2406 int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs);
2407 int pcibios_sriov_disable(struct pci_dev *pdev);
2408 resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno);
2409 #else
pci_iov_virtfn_bus(struct pci_dev * dev,int id)2410 static inline int pci_iov_virtfn_bus(struct pci_dev *dev, int id)
2411 {
2412 return -ENOSYS;
2413 }
pci_iov_virtfn_devfn(struct pci_dev * dev,int id)2414 static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id)
2415 {
2416 return -ENOSYS;
2417 }
2418
pci_iov_vf_id(struct pci_dev * dev)2419 static inline int pci_iov_vf_id(struct pci_dev *dev)
2420 {
2421 return -ENOSYS;
2422 }
2423
pci_iov_get_pf_drvdata(struct pci_dev * dev,struct pci_driver * pf_driver)2424 static inline void *pci_iov_get_pf_drvdata(struct pci_dev *dev,
2425 struct pci_driver *pf_driver)
2426 {
2427 return ERR_PTR(-EINVAL);
2428 }
2429
pci_enable_sriov(struct pci_dev * dev,int nr_virtfn)2430 static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
2431 { return -ENODEV; }
2432
pci_iov_sysfs_link(struct pci_dev * dev,struct pci_dev * virtfn,int id)2433 static inline int pci_iov_sysfs_link(struct pci_dev *dev,
2434 struct pci_dev *virtfn, int id)
2435 {
2436 return -ENODEV;
2437 }
pci_iov_add_virtfn(struct pci_dev * dev,int id)2438 static inline int pci_iov_add_virtfn(struct pci_dev *dev, int id)
2439 {
2440 return -ENOSYS;
2441 }
pci_iov_remove_virtfn(struct pci_dev * dev,int id)2442 static inline void pci_iov_remove_virtfn(struct pci_dev *dev,
2443 int id) { }
pci_disable_sriov(struct pci_dev * dev)2444 static inline void pci_disable_sriov(struct pci_dev *dev) { }
pci_num_vf(struct pci_dev * dev)2445 static inline int pci_num_vf(struct pci_dev *dev) { return 0; }
pci_vfs_assigned(struct pci_dev * dev)2446 static inline int pci_vfs_assigned(struct pci_dev *dev)
2447 { return 0; }
pci_sriov_set_totalvfs(struct pci_dev * dev,u16 numvfs)2448 static inline int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
2449 { return 0; }
pci_sriov_get_totalvfs(struct pci_dev * dev)2450 static inline int pci_sriov_get_totalvfs(struct pci_dev *dev)
2451 { return 0; }
2452 #define pci_sriov_configure_simple NULL
pci_iov_resource_size(struct pci_dev * dev,int resno)2453 static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
2454 { return 0; }
pci_vf_drivers_autoprobe(struct pci_dev * dev,bool probe)2455 static inline void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe) { }
2456 #endif
2457
2458 #if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
2459 void pci_hp_create_module_link(struct pci_slot *pci_slot);
2460 void pci_hp_remove_module_link(struct pci_slot *pci_slot);
2461 #endif
2462
2463 /**
2464 * pci_pcie_cap - get the saved PCIe capability offset
2465 * @dev: PCI device
2466 *
2467 * PCIe capability offset is calculated at PCI device initialization
2468 * time and saved in the data structure. This function returns saved
2469 * PCIe capability offset. Using this instead of pci_find_capability()
2470 * reduces unnecessary search in the PCI configuration space. If you
2471 * need to calculate PCIe capability offset from raw device for some
2472 * reasons, please use pci_find_capability() instead.
2473 */
pci_pcie_cap(struct pci_dev * dev)2474 static inline int pci_pcie_cap(struct pci_dev *dev)
2475 {
2476 return dev->pcie_cap;
2477 }
2478
2479 /**
2480 * pci_is_pcie - check if the PCI device is PCI Express capable
2481 * @dev: PCI device
2482 *
2483 * Returns: true if the PCI device is PCI Express capable, false otherwise.
2484 */
pci_is_pcie(struct pci_dev * dev)2485 static inline bool pci_is_pcie(struct pci_dev *dev)
2486 {
2487 return pci_pcie_cap(dev);
2488 }
2489
2490 /**
2491 * pcie_caps_reg - get the PCIe Capabilities Register
2492 * @dev: PCI device
2493 */
pcie_caps_reg(const struct pci_dev * dev)2494 static inline u16 pcie_caps_reg(const struct pci_dev *dev)
2495 {
2496 return dev->pcie_flags_reg;
2497 }
2498
2499 /**
2500 * pci_pcie_type - get the PCIe device/port type
2501 * @dev: PCI device
2502 */
pci_pcie_type(const struct pci_dev * dev)2503 static inline int pci_pcie_type(const struct pci_dev *dev)
2504 {
2505 return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
2506 }
2507
2508 /**
2509 * pcie_find_root_port - Get the PCIe root port device
2510 * @dev: PCI device
2511 *
2512 * Traverse up the parent chain and return the PCIe Root Port PCI Device
2513 * for a given PCI/PCIe Device.
2514 */
pcie_find_root_port(struct pci_dev * dev)2515 static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
2516 {
2517 while (dev) {
2518 if (pci_is_pcie(dev) &&
2519 pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
2520 return dev;
2521 dev = pci_upstream_bridge(dev);
2522 }
2523
2524 return NULL;
2525 }
2526
pci_dev_is_disconnected(const struct pci_dev * dev)2527 static inline bool pci_dev_is_disconnected(const struct pci_dev *dev)
2528 {
2529 /*
2530 * error_state is set in pci_dev_set_io_state() using xchg/cmpxchg()
2531 * and read w/o common lock. READ_ONCE() ensures compiler cannot cache
2532 * the value (e.g. inside the loop in pci_dev_wait()).
2533 */
2534 return READ_ONCE(dev->error_state) == pci_channel_io_perm_failure;
2535 }
2536
2537 void pci_request_acs(void);
2538 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
2539 bool pci_acs_path_enabled(struct pci_dev *start,
2540 struct pci_dev *end, u16 acs_flags);
2541 int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask);
2542
2543 #define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */
2544 #define PCI_VPD_LRDT_ID(x) ((x) | PCI_VPD_LRDT)
2545
2546 /* Large Resource Data Type Tag Item Names */
2547 #define PCI_VPD_LTIN_ID_STRING 0x02 /* Identifier String */
2548 #define PCI_VPD_LTIN_RO_DATA 0x10 /* Read-Only Data */
2549 #define PCI_VPD_LTIN_RW_DATA 0x11 /* Read-Write Data */
2550
2551 #define PCI_VPD_LRDT_ID_STRING PCI_VPD_LRDT_ID(PCI_VPD_LTIN_ID_STRING)
2552 #define PCI_VPD_LRDT_RO_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RO_DATA)
2553 #define PCI_VPD_LRDT_RW_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA)
2554
2555 #define PCI_VPD_RO_KEYWORD_PARTNO "PN"
2556 #define PCI_VPD_RO_KEYWORD_SERIALNO "SN"
2557 #define PCI_VPD_RO_KEYWORD_MFR_ID "MN"
2558 #define PCI_VPD_RO_KEYWORD_VENDOR0 "V0"
2559 #define PCI_VPD_RO_KEYWORD_CHKSUM "RV"
2560
2561 /**
2562 * pci_vpd_alloc - Allocate buffer and read VPD into it
2563 * @dev: PCI device
2564 * @size: pointer to field where VPD length is returned
2565 *
2566 * Returns pointer to allocated buffer or an ERR_PTR in case of failure
2567 */
2568 void *pci_vpd_alloc(struct pci_dev *dev, unsigned int *size);
2569
2570 /**
2571 * pci_vpd_find_id_string - Locate id string in VPD
2572 * @buf: Pointer to buffered VPD data
2573 * @len: The length of the buffer area in which to search
2574 * @size: Pointer to field where length of id string is returned
2575 *
2576 * Returns the index of the id string or -ENOENT if not found.
2577 */
2578 int pci_vpd_find_id_string(const u8 *buf, unsigned int len, unsigned int *size);
2579
2580 /**
2581 * pci_vpd_find_ro_info_keyword - Locate info field keyword in VPD RO section
2582 * @buf: Pointer to buffered VPD data
2583 * @len: The length of the buffer area in which to search
2584 * @kw: The keyword to search for
2585 * @size: Pointer to field where length of found keyword data is returned
2586 *
2587 * Returns the index of the information field keyword data or -ENOENT if
2588 * not found.
2589 */
2590 int pci_vpd_find_ro_info_keyword(const void *buf, unsigned int len,
2591 const char *kw, unsigned int *size);
2592
2593 /**
2594 * pci_vpd_check_csum - Check VPD checksum
2595 * @buf: Pointer to buffered VPD data
2596 * @len: VPD size
2597 *
2598 * Returns 1 if VPD has no checksum, otherwise 0 or an errno
2599 */
2600 int pci_vpd_check_csum(const void *buf, unsigned int len);
2601
2602 /* PCI <-> OF binding helpers */
2603 #ifdef CONFIG_OF
2604 struct device_node;
2605 struct irq_domain;
2606 struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
2607 bool pci_host_of_has_msi_map(struct device *dev);
2608
2609 /* Arch may override this (weak) */
2610 struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
2611
2612 #else /* CONFIG_OF */
2613 static inline struct irq_domain *
pci_host_bridge_of_msi_domain(struct pci_bus * bus)2614 pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
pci_host_of_has_msi_map(struct device * dev)2615 static inline bool pci_host_of_has_msi_map(struct device *dev) { return false; }
2616 #endif /* CONFIG_OF */
2617
2618 static inline struct device_node *
pci_device_to_OF_node(const struct pci_dev * pdev)2619 pci_device_to_OF_node(const struct pci_dev *pdev)
2620 {
2621 return pdev ? pdev->dev.of_node : NULL;
2622 }
2623
pci_bus_to_OF_node(struct pci_bus * bus)2624 static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
2625 {
2626 return bus ? bus->dev.of_node : NULL;
2627 }
2628
2629 #ifdef CONFIG_ACPI
2630 struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus);
2631
2632 void
2633 pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *));
2634 bool pci_pr3_present(struct pci_dev *pdev);
2635 #else
2636 static inline struct irq_domain *
pci_host_bridge_acpi_msi_domain(struct pci_bus * bus)2637 pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) { return NULL; }
pci_pr3_present(struct pci_dev * pdev)2638 static inline bool pci_pr3_present(struct pci_dev *pdev) { return false; }
2639 #endif
2640
2641 #if defined(CONFIG_X86) && defined(CONFIG_ACPI)
2642 bool arch_pci_dev_is_removable(struct pci_dev *pdev);
2643 #else
arch_pci_dev_is_removable(struct pci_dev * pdev)2644 static inline bool arch_pci_dev_is_removable(struct pci_dev *pdev) { return false; }
2645 #endif
2646
2647 #ifdef CONFIG_EEH
pci_dev_to_eeh_dev(struct pci_dev * pdev)2648 static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev)
2649 {
2650 return pdev->dev.archdata.edev;
2651 }
2652 #endif
2653
2654 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns);
2655 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2);
2656 int pci_for_each_dma_alias(struct pci_dev *pdev,
2657 int (*fn)(struct pci_dev *pdev,
2658 u16 alias, void *data), void *data);
2659
2660 /* Helper functions for operation of device flag */
pci_set_dev_assigned(struct pci_dev * pdev)2661 static inline void pci_set_dev_assigned(struct pci_dev *pdev)
2662 {
2663 pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
2664 }
pci_clear_dev_assigned(struct pci_dev * pdev)2665 static inline void pci_clear_dev_assigned(struct pci_dev *pdev)
2666 {
2667 pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
2668 }
pci_is_dev_assigned(struct pci_dev * pdev)2669 static inline bool pci_is_dev_assigned(struct pci_dev *pdev)
2670 {
2671 return (pdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) == PCI_DEV_FLAGS_ASSIGNED;
2672 }
2673
2674 /**
2675 * pci_ari_enabled - query ARI forwarding status
2676 * @bus: the PCI bus
2677 *
2678 * Returns true if ARI forwarding is enabled.
2679 */
pci_ari_enabled(struct pci_bus * bus)2680 static inline bool pci_ari_enabled(struct pci_bus *bus)
2681 {
2682 return bus->self && bus->self->ari_enabled;
2683 }
2684
2685 /**
2686 * pci_is_thunderbolt_attached - whether device is on a Thunderbolt daisy chain
2687 * @pdev: PCI device to check
2688 *
2689 * Walk upwards from @pdev and check for each encountered bridge if it's part
2690 * of a Thunderbolt controller. Reaching the host bridge means @pdev is not
2691 * Thunderbolt-attached. (But rather soldered to the mainboard usually.)
2692 */
pci_is_thunderbolt_attached(struct pci_dev * pdev)2693 static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev)
2694 {
2695 struct pci_dev *parent = pdev;
2696
2697 if (pdev->is_thunderbolt)
2698 return true;
2699
2700 while ((parent = pci_upstream_bridge(parent)))
2701 if (parent->is_thunderbolt)
2702 return true;
2703
2704 return false;
2705 }
2706
2707 #if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH)
2708 void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type);
2709 #endif
2710
2711 #include <linux/dma-mapping.h>
2712
2713 #define pci_printk(level, pdev, fmt, arg...) \
2714 dev_printk(level, &(pdev)->dev, fmt, ##arg)
2715
2716 #define pci_emerg(pdev, fmt, arg...) dev_emerg(&(pdev)->dev, fmt, ##arg)
2717 #define pci_alert(pdev, fmt, arg...) dev_alert(&(pdev)->dev, fmt, ##arg)
2718 #define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg)
2719 #define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg)
2720 #define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg)
2721 #define pci_warn_once(pdev, fmt, arg...) dev_warn_once(&(pdev)->dev, fmt, ##arg)
2722 #define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg)
2723 #define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg)
2724 #define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg)
2725
2726 #define pci_notice_ratelimited(pdev, fmt, arg...) \
2727 dev_notice_ratelimited(&(pdev)->dev, fmt, ##arg)
2728
2729 #define pci_info_ratelimited(pdev, fmt, arg...) \
2730 dev_info_ratelimited(&(pdev)->dev, fmt, ##arg)
2731
2732 #define pci_WARN(pdev, condition, fmt, arg...) \
2733 WARN(condition, "%s %s: " fmt, \
2734 dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg)
2735
2736 #define pci_WARN_ONCE(pdev, condition, fmt, arg...) \
2737 WARN_ONCE(condition, "%s %s: " fmt, \
2738 dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg)
2739
2740 #endif /* LINUX_PCI_H */
2741