1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * PCI Bus Services, see include/linux/pci.h for further explanation.
4 *
5 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
6 * David Mosberger-Tang
7 *
8 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
9 */
10
11 #include <linux/acpi.h>
12 #include <linux/kernel.h>
13 #include <linux/delay.h>
14 #include <linux/dmi.h>
15 #include <linux/init.h>
16 #include <linux/msi.h>
17 #include <linux/of.h>
18 #include <linux/pci.h>
19 #include <linux/pm.h>
20 #include <linux/slab.h>
21 #include <linux/module.h>
22 #include <linux/spinlock.h>
23 #include <linux/string.h>
24 #include <linux/log2.h>
25 #include <linux/logic_pio.h>
26 #include <linux/pm_wakeup.h>
27 #include <linux/interrupt.h>
28 #include <linux/device.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/pci_hotplug.h>
31 #include <linux/vmalloc.h>
32 #include <asm/dma.h>
33 #include <linux/aer.h>
34 #include <linux/bitfield.h>
35 #include "pci.h"
36
37 DEFINE_MUTEX(pci_slot_mutex);
38
39 const char *pci_power_names[] = {
40 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
41 };
42 EXPORT_SYMBOL_GPL(pci_power_names);
43
44 #ifdef CONFIG_X86_32
45 int isa_dma_bridge_buggy;
46 EXPORT_SYMBOL(isa_dma_bridge_buggy);
47 #endif
48
49 int pci_pci_problems;
50 EXPORT_SYMBOL(pci_pci_problems);
51
52 unsigned int pci_pm_d3hot_delay;
53
54 static void pci_pme_list_scan(struct work_struct *work);
55
56 static LIST_HEAD(pci_pme_list);
57 static DEFINE_MUTEX(pci_pme_list_mutex);
58 static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
59
60 struct pci_pme_device {
61 struct list_head list;
62 struct pci_dev *dev;
63 };
64
65 #define PME_TIMEOUT 1000 /* How long between PME checks */
66
pci_dev_d3_sleep(struct pci_dev * dev)67 static void pci_dev_d3_sleep(struct pci_dev *dev)
68 {
69 unsigned int delay_ms = max(dev->d3hot_delay, pci_pm_d3hot_delay);
70 unsigned int upper;
71
72 if (delay_ms) {
73 /* Use a 20% upper bound, 1ms minimum */
74 upper = max(DIV_ROUND_CLOSEST(delay_ms, 5), 1U);
75 usleep_range(delay_ms * USEC_PER_MSEC,
76 (delay_ms + upper) * USEC_PER_MSEC);
77 }
78 }
79
pci_reset_supported(struct pci_dev * dev)80 bool pci_reset_supported(struct pci_dev *dev)
81 {
82 return dev->reset_methods[0] != 0;
83 }
84
85 #ifdef CONFIG_PCI_DOMAINS
86 int pci_domains_supported = 1;
87 #endif
88
89 #define DEFAULT_CARDBUS_IO_SIZE (256)
90 #define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
91 /* pci=cbmemsize=nnM,cbiosize=nn can override this */
92 unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
93 unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
94
95 #define DEFAULT_HOTPLUG_IO_SIZE (256)
96 #define DEFAULT_HOTPLUG_MMIO_SIZE (2*1024*1024)
97 #define DEFAULT_HOTPLUG_MMIO_PREF_SIZE (2*1024*1024)
98 /* hpiosize=nn can override this */
99 unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
100 /*
101 * pci=hpmmiosize=nnM overrides non-prefetchable MMIO size,
102 * pci=hpmmioprefsize=nnM overrides prefetchable MMIO size;
103 * pci=hpmemsize=nnM overrides both
104 */
105 unsigned long pci_hotplug_mmio_size = DEFAULT_HOTPLUG_MMIO_SIZE;
106 unsigned long pci_hotplug_mmio_pref_size = DEFAULT_HOTPLUG_MMIO_PREF_SIZE;
107
108 #define DEFAULT_HOTPLUG_BUS_SIZE 1
109 unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
110
111
112 /* PCIe MPS/MRRS strategy; can be overridden by kernel command-line param */
113 #ifdef CONFIG_PCIE_BUS_TUNE_OFF
114 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
115 #elif defined CONFIG_PCIE_BUS_SAFE
116 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE;
117 #elif defined CONFIG_PCIE_BUS_PERFORMANCE
118 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE;
119 #elif defined CONFIG_PCIE_BUS_PEER2PEER
120 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PEER2PEER;
121 #else
122 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
123 #endif
124
125 /*
126 * The default CLS is used if arch didn't set CLS explicitly and not
127 * all pci devices agree on the same value. Arch can override either
128 * the dfl or actual value as it sees fit. Don't forget this is
129 * measured in 32-bit words, not bytes.
130 */
131 u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
132 u8 pci_cache_line_size;
133
134 /*
135 * If we set up a device for bus mastering, we need to check the latency
136 * timer as certain BIOSes forget to set it properly.
137 */
138 unsigned int pcibios_max_latency = 255;
139
140 /* If set, the PCIe ARI capability will not be used. */
141 static bool pcie_ari_disabled;
142
143 /* If set, the PCIe ATS capability will not be used. */
144 static bool pcie_ats_disabled;
145
146 /* If set, the PCI config space of each device is printed during boot. */
147 bool pci_early_dump;
148
pci_ats_disabled(void)149 bool pci_ats_disabled(void)
150 {
151 return pcie_ats_disabled;
152 }
153 EXPORT_SYMBOL_GPL(pci_ats_disabled);
154
155 /* Disable bridge_d3 for all PCIe ports */
156 static bool pci_bridge_d3_disable;
157 /* Force bridge_d3 for all PCIe ports */
158 static bool pci_bridge_d3_force;
159
pcie_port_pm_setup(char * str)160 static int __init pcie_port_pm_setup(char *str)
161 {
162 if (!strcmp(str, "off"))
163 pci_bridge_d3_disable = true;
164 else if (!strcmp(str, "force"))
165 pci_bridge_d3_force = true;
166 return 1;
167 }
168 __setup("pcie_port_pm=", pcie_port_pm_setup);
169
170 /**
171 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
172 * @bus: pointer to PCI bus structure to search
173 *
174 * Given a PCI bus, returns the highest PCI bus number present in the set
175 * including the given PCI bus and its list of child PCI buses.
176 */
pci_bus_max_busnr(struct pci_bus * bus)177 unsigned char pci_bus_max_busnr(struct pci_bus *bus)
178 {
179 struct pci_bus *tmp;
180 unsigned char max, n;
181
182 max = bus->busn_res.end;
183 list_for_each_entry(tmp, &bus->children, node) {
184 n = pci_bus_max_busnr(tmp);
185 if (n > max)
186 max = n;
187 }
188 return max;
189 }
190 EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
191
192 /**
193 * pci_status_get_and_clear_errors - return and clear error bits in PCI_STATUS
194 * @pdev: the PCI device
195 *
196 * Returns error bits set in PCI_STATUS and clears them.
197 */
pci_status_get_and_clear_errors(struct pci_dev * pdev)198 int pci_status_get_and_clear_errors(struct pci_dev *pdev)
199 {
200 u16 status;
201 int ret;
202
203 ret = pci_read_config_word(pdev, PCI_STATUS, &status);
204 if (ret != PCIBIOS_SUCCESSFUL)
205 return -EIO;
206
207 status &= PCI_STATUS_ERROR_BITS;
208 if (status)
209 pci_write_config_word(pdev, PCI_STATUS, status);
210
211 return status;
212 }
213 EXPORT_SYMBOL_GPL(pci_status_get_and_clear_errors);
214
215 #ifdef CONFIG_HAS_IOMEM
__pci_ioremap_resource(struct pci_dev * pdev,int bar,bool write_combine)216 static void __iomem *__pci_ioremap_resource(struct pci_dev *pdev, int bar,
217 bool write_combine)
218 {
219 struct resource *res = &pdev->resource[bar];
220 resource_size_t start = res->start;
221 resource_size_t size = resource_size(res);
222
223 /*
224 * Make sure the BAR is actually a memory resource, not an IO resource
225 */
226 if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
227 pci_err(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
228 return NULL;
229 }
230
231 if (write_combine)
232 return ioremap_wc(start, size);
233
234 return ioremap(start, size);
235 }
236
pci_ioremap_bar(struct pci_dev * pdev,int bar)237 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
238 {
239 return __pci_ioremap_resource(pdev, bar, false);
240 }
241 EXPORT_SYMBOL_GPL(pci_ioremap_bar);
242
pci_ioremap_wc_bar(struct pci_dev * pdev,int bar)243 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
244 {
245 return __pci_ioremap_resource(pdev, bar, true);
246 }
247 EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
248 #endif
249
250 /**
251 * pci_dev_str_match_path - test if a path string matches a device
252 * @dev: the PCI device to test
253 * @path: string to match the device against
254 * @endptr: pointer to the string after the match
255 *
256 * Test if a string (typically from a kernel parameter) formatted as a
257 * path of device/function addresses matches a PCI device. The string must
258 * be of the form:
259 *
260 * [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
261 *
262 * A path for a device can be obtained using 'lspci -t'. Using a path
263 * is more robust against bus renumbering than using only a single bus,
264 * device and function address.
265 *
266 * Returns 1 if the string matches the device, 0 if it does not and
267 * a negative error code if it fails to parse the string.
268 */
pci_dev_str_match_path(struct pci_dev * dev,const char * path,const char ** endptr)269 static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
270 const char **endptr)
271 {
272 int ret;
273 unsigned int seg, bus, slot, func;
274 char *wpath, *p;
275 char end;
276
277 *endptr = strchrnul(path, ';');
278
279 wpath = kmemdup_nul(path, *endptr - path, GFP_ATOMIC);
280 if (!wpath)
281 return -ENOMEM;
282
283 while (1) {
284 p = strrchr(wpath, '/');
285 if (!p)
286 break;
287 ret = sscanf(p, "/%x.%x%c", &slot, &func, &end);
288 if (ret != 2) {
289 ret = -EINVAL;
290 goto free_and_exit;
291 }
292
293 if (dev->devfn != PCI_DEVFN(slot, func)) {
294 ret = 0;
295 goto free_and_exit;
296 }
297
298 /*
299 * Note: we don't need to get a reference to the upstream
300 * bridge because we hold a reference to the top level
301 * device which should hold a reference to the bridge,
302 * and so on.
303 */
304 dev = pci_upstream_bridge(dev);
305 if (!dev) {
306 ret = 0;
307 goto free_and_exit;
308 }
309
310 *p = 0;
311 }
312
313 ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot,
314 &func, &end);
315 if (ret != 4) {
316 seg = 0;
317 ret = sscanf(wpath, "%x:%x.%x%c", &bus, &slot, &func, &end);
318 if (ret != 3) {
319 ret = -EINVAL;
320 goto free_and_exit;
321 }
322 }
323
324 ret = (seg == pci_domain_nr(dev->bus) &&
325 bus == dev->bus->number &&
326 dev->devfn == PCI_DEVFN(slot, func));
327
328 free_and_exit:
329 kfree(wpath);
330 return ret;
331 }
332
333 /**
334 * pci_dev_str_match - test if a string matches a device
335 * @dev: the PCI device to test
336 * @p: string to match the device against
337 * @endptr: pointer to the string after the match
338 *
339 * Test if a string (typically from a kernel parameter) matches a specified
340 * PCI device. The string may be of one of the following formats:
341 *
342 * [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
343 * pci:<vendor>:<device>[:<subvendor>:<subdevice>]
344 *
345 * The first format specifies a PCI bus/device/function address which
346 * may change if new hardware is inserted, if motherboard firmware changes,
347 * or due to changes caused in kernel parameters. If the domain is
348 * left unspecified, it is taken to be 0. In order to be robust against
349 * bus renumbering issues, a path of PCI device/function numbers may be used
350 * to address the specific device. The path for a device can be determined
351 * through the use of 'lspci -t'.
352 *
353 * The second format matches devices using IDs in the configuration
354 * space which may match multiple devices in the system. A value of 0
355 * for any field will match all devices. (Note: this differs from
356 * in-kernel code that uses PCI_ANY_ID which is ~0; this is for
357 * legacy reasons and convenience so users don't have to specify
358 * FFFFFFFFs on the command line.)
359 *
360 * Returns 1 if the string matches the device, 0 if it does not and
361 * a negative error code if the string cannot be parsed.
362 */
pci_dev_str_match(struct pci_dev * dev,const char * p,const char ** endptr)363 static int pci_dev_str_match(struct pci_dev *dev, const char *p,
364 const char **endptr)
365 {
366 int ret;
367 int count;
368 unsigned short vendor, device, subsystem_vendor, subsystem_device;
369
370 if (strncmp(p, "pci:", 4) == 0) {
371 /* PCI vendor/device (subvendor/subdevice) IDs are specified */
372 p += 4;
373 ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device,
374 &subsystem_vendor, &subsystem_device, &count);
375 if (ret != 4) {
376 ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count);
377 if (ret != 2)
378 return -EINVAL;
379
380 subsystem_vendor = 0;
381 subsystem_device = 0;
382 }
383
384 p += count;
385
386 if ((!vendor || vendor == dev->vendor) &&
387 (!device || device == dev->device) &&
388 (!subsystem_vendor ||
389 subsystem_vendor == dev->subsystem_vendor) &&
390 (!subsystem_device ||
391 subsystem_device == dev->subsystem_device))
392 goto found;
393 } else {
394 /*
395 * PCI Bus, Device, Function IDs are specified
396 * (optionally, may include a path of devfns following it)
397 */
398 ret = pci_dev_str_match_path(dev, p, &p);
399 if (ret < 0)
400 return ret;
401 else if (ret)
402 goto found;
403 }
404
405 *endptr = p;
406 return 0;
407
408 found:
409 *endptr = p;
410 return 1;
411 }
412
__pci_find_next_cap_ttl(struct pci_bus * bus,unsigned int devfn,u8 pos,int cap,int * ttl)413 static u8 __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
414 u8 pos, int cap, int *ttl)
415 {
416 u8 id;
417 u16 ent;
418
419 pci_bus_read_config_byte(bus, devfn, pos, &pos);
420
421 while ((*ttl)--) {
422 if (pos < 0x40)
423 break;
424 pos &= ~3;
425 pci_bus_read_config_word(bus, devfn, pos, &ent);
426
427 id = ent & 0xff;
428 if (id == 0xff)
429 break;
430 if (id == cap)
431 return pos;
432 pos = (ent >> 8);
433 }
434 return 0;
435 }
436
__pci_find_next_cap(struct pci_bus * bus,unsigned int devfn,u8 pos,int cap)437 static u8 __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
438 u8 pos, int cap)
439 {
440 int ttl = PCI_FIND_CAP_TTL;
441
442 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
443 }
444
pci_find_next_capability(struct pci_dev * dev,u8 pos,int cap)445 u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
446 {
447 return __pci_find_next_cap(dev->bus, dev->devfn,
448 pos + PCI_CAP_LIST_NEXT, cap);
449 }
450 EXPORT_SYMBOL_GPL(pci_find_next_capability);
451
__pci_bus_find_cap_start(struct pci_bus * bus,unsigned int devfn,u8 hdr_type)452 static u8 __pci_bus_find_cap_start(struct pci_bus *bus,
453 unsigned int devfn, u8 hdr_type)
454 {
455 u16 status;
456
457 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
458 if (!(status & PCI_STATUS_CAP_LIST))
459 return 0;
460
461 switch (hdr_type) {
462 case PCI_HEADER_TYPE_NORMAL:
463 case PCI_HEADER_TYPE_BRIDGE:
464 return PCI_CAPABILITY_LIST;
465 case PCI_HEADER_TYPE_CARDBUS:
466 return PCI_CB_CAPABILITY_LIST;
467 }
468
469 return 0;
470 }
471
472 /**
473 * pci_find_capability - query for devices' capabilities
474 * @dev: PCI device to query
475 * @cap: capability code
476 *
477 * Tell if a device supports a given PCI capability.
478 * Returns the address of the requested capability structure within the
479 * device's PCI configuration space or 0 in case the device does not
480 * support it. Possible values for @cap include:
481 *
482 * %PCI_CAP_ID_PM Power Management
483 * %PCI_CAP_ID_AGP Accelerated Graphics Port
484 * %PCI_CAP_ID_VPD Vital Product Data
485 * %PCI_CAP_ID_SLOTID Slot Identification
486 * %PCI_CAP_ID_MSI Message Signalled Interrupts
487 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
488 * %PCI_CAP_ID_PCIX PCI-X
489 * %PCI_CAP_ID_EXP PCI Express
490 */
pci_find_capability(struct pci_dev * dev,int cap)491 u8 pci_find_capability(struct pci_dev *dev, int cap)
492 {
493 u8 pos;
494
495 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
496 if (pos)
497 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
498
499 return pos;
500 }
501 EXPORT_SYMBOL(pci_find_capability);
502
503 /**
504 * pci_bus_find_capability - query for devices' capabilities
505 * @bus: the PCI bus to query
506 * @devfn: PCI device to query
507 * @cap: capability code
508 *
509 * Like pci_find_capability() but works for PCI devices that do not have a
510 * pci_dev structure set up yet.
511 *
512 * Returns the address of the requested capability structure within the
513 * device's PCI configuration space or 0 in case the device does not
514 * support it.
515 */
pci_bus_find_capability(struct pci_bus * bus,unsigned int devfn,int cap)516 u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
517 {
518 u8 hdr_type, pos;
519
520 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
521
522 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
523 if (pos)
524 pos = __pci_find_next_cap(bus, devfn, pos, cap);
525
526 return pos;
527 }
528 EXPORT_SYMBOL(pci_bus_find_capability);
529
530 /**
531 * pci_find_next_ext_capability - Find an extended capability
532 * @dev: PCI device to query
533 * @start: address at which to start looking (0 to start at beginning of list)
534 * @cap: capability code
535 *
536 * Returns the address of the next matching extended capability structure
537 * within the device's PCI configuration space or 0 if the device does
538 * not support it. Some capabilities can occur several times, e.g., the
539 * vendor-specific capability, and this provides a way to find them all.
540 */
pci_find_next_ext_capability(struct pci_dev * dev,u16 start,int cap)541 u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 start, int cap)
542 {
543 u32 header;
544 int ttl;
545 u16 pos = PCI_CFG_SPACE_SIZE;
546
547 /* minimum 8 bytes per capability */
548 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
549
550 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
551 return 0;
552
553 if (start)
554 pos = start;
555
556 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
557 return 0;
558
559 /*
560 * If we have no capabilities, this is indicated by cap ID,
561 * cap version and next pointer all being 0.
562 */
563 if (header == 0)
564 return 0;
565
566 while (ttl-- > 0) {
567 if (PCI_EXT_CAP_ID(header) == cap && pos != start)
568 return pos;
569
570 pos = PCI_EXT_CAP_NEXT(header);
571 if (pos < PCI_CFG_SPACE_SIZE)
572 break;
573
574 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
575 break;
576 }
577
578 return 0;
579 }
580 EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
581
582 /**
583 * pci_find_ext_capability - Find an extended capability
584 * @dev: PCI device to query
585 * @cap: capability code
586 *
587 * Returns the address of the requested extended capability structure
588 * within the device's PCI configuration space or 0 if the device does
589 * not support it. Possible values for @cap include:
590 *
591 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
592 * %PCI_EXT_CAP_ID_VC Virtual Channel
593 * %PCI_EXT_CAP_ID_DSN Device Serial Number
594 * %PCI_EXT_CAP_ID_PWR Power Budgeting
595 */
pci_find_ext_capability(struct pci_dev * dev,int cap)596 u16 pci_find_ext_capability(struct pci_dev *dev, int cap)
597 {
598 return pci_find_next_ext_capability(dev, 0, cap);
599 }
600 EXPORT_SYMBOL_GPL(pci_find_ext_capability);
601
602 /**
603 * pci_get_dsn - Read and return the 8-byte Device Serial Number
604 * @dev: PCI device to query
605 *
606 * Looks up the PCI_EXT_CAP_ID_DSN and reads the 8 bytes of the Device Serial
607 * Number.
608 *
609 * Returns the DSN, or zero if the capability does not exist.
610 */
pci_get_dsn(struct pci_dev * dev)611 u64 pci_get_dsn(struct pci_dev *dev)
612 {
613 u32 dword;
614 u64 dsn;
615 int pos;
616
617 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN);
618 if (!pos)
619 return 0;
620
621 /*
622 * The Device Serial Number is two dwords offset 4 bytes from the
623 * capability position. The specification says that the first dword is
624 * the lower half, and the second dword is the upper half.
625 */
626 pos += 4;
627 pci_read_config_dword(dev, pos, &dword);
628 dsn = (u64)dword;
629 pci_read_config_dword(dev, pos + 4, &dword);
630 dsn |= ((u64)dword) << 32;
631
632 return dsn;
633 }
634 EXPORT_SYMBOL_GPL(pci_get_dsn);
635
__pci_find_next_ht_cap(struct pci_dev * dev,u8 pos,int ht_cap)636 static u8 __pci_find_next_ht_cap(struct pci_dev *dev, u8 pos, int ht_cap)
637 {
638 int rc, ttl = PCI_FIND_CAP_TTL;
639 u8 cap, mask;
640
641 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
642 mask = HT_3BIT_CAP_MASK;
643 else
644 mask = HT_5BIT_CAP_MASK;
645
646 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
647 PCI_CAP_ID_HT, &ttl);
648 while (pos) {
649 rc = pci_read_config_byte(dev, pos + 3, &cap);
650 if (rc != PCIBIOS_SUCCESSFUL)
651 return 0;
652
653 if ((cap & mask) == ht_cap)
654 return pos;
655
656 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
657 pos + PCI_CAP_LIST_NEXT,
658 PCI_CAP_ID_HT, &ttl);
659 }
660
661 return 0;
662 }
663
664 /**
665 * pci_find_next_ht_capability - query a device's HyperTransport capabilities
666 * @dev: PCI device to query
667 * @pos: Position from which to continue searching
668 * @ht_cap: HyperTransport capability code
669 *
670 * To be used in conjunction with pci_find_ht_capability() to search for
671 * all capabilities matching @ht_cap. @pos should always be a value returned
672 * from pci_find_ht_capability().
673 *
674 * NB. To be 100% safe against broken PCI devices, the caller should take
675 * steps to avoid an infinite loop.
676 */
pci_find_next_ht_capability(struct pci_dev * dev,u8 pos,int ht_cap)677 u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap)
678 {
679 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
680 }
681 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
682
683 /**
684 * pci_find_ht_capability - query a device's HyperTransport capabilities
685 * @dev: PCI device to query
686 * @ht_cap: HyperTransport capability code
687 *
688 * Tell if a device supports a given HyperTransport capability.
689 * Returns an address within the device's PCI configuration space
690 * or 0 in case the device does not support the request capability.
691 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
692 * which has a HyperTransport capability matching @ht_cap.
693 */
pci_find_ht_capability(struct pci_dev * dev,int ht_cap)694 u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
695 {
696 u8 pos;
697
698 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
699 if (pos)
700 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
701
702 return pos;
703 }
704 EXPORT_SYMBOL_GPL(pci_find_ht_capability);
705
706 /**
707 * pci_find_vsec_capability - Find a vendor-specific extended capability
708 * @dev: PCI device to query
709 * @vendor: Vendor ID for which capability is defined
710 * @cap: Vendor-specific capability ID
711 *
712 * If @dev has Vendor ID @vendor, search for a VSEC capability with
713 * VSEC ID @cap. If found, return the capability offset in
714 * config space; otherwise return 0.
715 */
pci_find_vsec_capability(struct pci_dev * dev,u16 vendor,int cap)716 u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap)
717 {
718 u16 vsec = 0;
719 u32 header;
720 int ret;
721
722 if (vendor != dev->vendor)
723 return 0;
724
725 while ((vsec = pci_find_next_ext_capability(dev, vsec,
726 PCI_EXT_CAP_ID_VNDR))) {
727 ret = pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER, &header);
728 if (ret != PCIBIOS_SUCCESSFUL)
729 continue;
730
731 if (PCI_VNDR_HEADER_ID(header) == cap)
732 return vsec;
733 }
734
735 return 0;
736 }
737 EXPORT_SYMBOL_GPL(pci_find_vsec_capability);
738
739 /**
740 * pci_find_dvsec_capability - Find DVSEC for vendor
741 * @dev: PCI device to query
742 * @vendor: Vendor ID to match for the DVSEC
743 * @dvsec: Designated Vendor-specific capability ID
744 *
745 * If DVSEC has Vendor ID @vendor and DVSEC ID @dvsec return the capability
746 * offset in config space; otherwise return 0.
747 */
pci_find_dvsec_capability(struct pci_dev * dev,u16 vendor,u16 dvsec)748 u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec)
749 {
750 int pos;
751
752 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DVSEC);
753 if (!pos)
754 return 0;
755
756 while (pos) {
757 u16 v, id;
758
759 pci_read_config_word(dev, pos + PCI_DVSEC_HEADER1, &v);
760 pci_read_config_word(dev, pos + PCI_DVSEC_HEADER2, &id);
761 if (vendor == v && dvsec == id)
762 return pos;
763
764 pos = pci_find_next_ext_capability(dev, pos, PCI_EXT_CAP_ID_DVSEC);
765 }
766
767 return 0;
768 }
769 EXPORT_SYMBOL_GPL(pci_find_dvsec_capability);
770
771 /**
772 * pci_find_parent_resource - return resource region of parent bus of given
773 * region
774 * @dev: PCI device structure contains resources to be searched
775 * @res: child resource record for which parent is sought
776 *
777 * For given resource region of given device, return the resource region of
778 * parent bus the given region is contained in.
779 */
pci_find_parent_resource(const struct pci_dev * dev,struct resource * res)780 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
781 struct resource *res)
782 {
783 const struct pci_bus *bus = dev->bus;
784 struct resource *r;
785 int i;
786
787 pci_bus_for_each_resource(bus, r, i) {
788 if (!r)
789 continue;
790 if (resource_contains(r, res)) {
791
792 /*
793 * If the window is prefetchable but the BAR is
794 * not, the allocator made a mistake.
795 */
796 if (r->flags & IORESOURCE_PREFETCH &&
797 !(res->flags & IORESOURCE_PREFETCH))
798 return NULL;
799
800 /*
801 * If we're below a transparent bridge, there may
802 * be both a positively-decoded aperture and a
803 * subtractively-decoded region that contain the BAR.
804 * We want the positively-decoded one, so this depends
805 * on pci_bus_for_each_resource() giving us those
806 * first.
807 */
808 return r;
809 }
810 }
811 return NULL;
812 }
813 EXPORT_SYMBOL(pci_find_parent_resource);
814
815 /**
816 * pci_find_resource - Return matching PCI device resource
817 * @dev: PCI device to query
818 * @res: Resource to look for
819 *
820 * Goes over standard PCI resources (BARs) and checks if the given resource
821 * is partially or fully contained in any of them. In that case the
822 * matching resource is returned, %NULL otherwise.
823 */
pci_find_resource(struct pci_dev * dev,struct resource * res)824 struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
825 {
826 int i;
827
828 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
829 struct resource *r = &dev->resource[i];
830
831 if (r->start && resource_contains(r, res))
832 return r;
833 }
834
835 return NULL;
836 }
837 EXPORT_SYMBOL(pci_find_resource);
838
839 /**
840 * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
841 * @dev: the PCI device to operate on
842 * @pos: config space offset of status word
843 * @mask: mask of bit(s) to care about in status word
844 *
845 * Return 1 when mask bit(s) in status word clear, 0 otherwise.
846 */
pci_wait_for_pending(struct pci_dev * dev,int pos,u16 mask)847 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
848 {
849 int i;
850
851 /* Wait for Transaction Pending bit clean */
852 for (i = 0; i < 4; i++) {
853 u16 status;
854 if (i)
855 msleep((1 << (i - 1)) * 100);
856
857 pci_read_config_word(dev, pos, &status);
858 if (!(status & mask))
859 return 1;
860 }
861
862 return 0;
863 }
864
865 static int pci_acs_enable;
866
867 /**
868 * pci_request_acs - ask for ACS to be enabled if supported
869 */
pci_request_acs(void)870 void pci_request_acs(void)
871 {
872 pci_acs_enable = 1;
873 }
874
875 static const char *disable_acs_redir_param;
876
877 /**
878 * pci_disable_acs_redir - disable ACS redirect capabilities
879 * @dev: the PCI device
880 *
881 * For only devices specified in the disable_acs_redir parameter.
882 */
pci_disable_acs_redir(struct pci_dev * dev)883 static void pci_disable_acs_redir(struct pci_dev *dev)
884 {
885 int ret = 0;
886 const char *p;
887 int pos;
888 u16 ctrl;
889
890 if (!disable_acs_redir_param)
891 return;
892
893 p = disable_acs_redir_param;
894 while (*p) {
895 ret = pci_dev_str_match(dev, p, &p);
896 if (ret < 0) {
897 pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n",
898 disable_acs_redir_param);
899
900 break;
901 } else if (ret == 1) {
902 /* Found a match */
903 break;
904 }
905
906 if (*p != ';' && *p != ',') {
907 /* End of param or invalid format */
908 break;
909 }
910 p++;
911 }
912
913 if (ret != 1)
914 return;
915
916 if (!pci_dev_specific_disable_acs_redir(dev))
917 return;
918
919 pos = dev->acs_cap;
920 if (!pos) {
921 pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n");
922 return;
923 }
924
925 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
926
927 /* P2P Request & Completion Redirect */
928 ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
929
930 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
931
932 pci_info(dev, "disabled ACS redirect\n");
933 }
934
935 /**
936 * pci_std_enable_acs - enable ACS on devices using standard ACS capabilities
937 * @dev: the PCI device
938 */
pci_std_enable_acs(struct pci_dev * dev)939 static void pci_std_enable_acs(struct pci_dev *dev)
940 {
941 int pos;
942 u16 cap;
943 u16 ctrl;
944
945 pos = dev->acs_cap;
946 if (!pos)
947 return;
948
949 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
950 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
951
952 /* Source Validation */
953 ctrl |= (cap & PCI_ACS_SV);
954
955 /* P2P Request Redirect */
956 ctrl |= (cap & PCI_ACS_RR);
957
958 /* P2P Completion Redirect */
959 ctrl |= (cap & PCI_ACS_CR);
960
961 /* Upstream Forwarding */
962 ctrl |= (cap & PCI_ACS_UF);
963
964 /* Enable Translation Blocking for external devices and noats */
965 if (pci_ats_disabled() || dev->external_facing || dev->untrusted)
966 ctrl |= (cap & PCI_ACS_TB);
967
968 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
969 }
970
971 /**
972 * pci_enable_acs - enable ACS if hardware support it
973 * @dev: the PCI device
974 */
pci_enable_acs(struct pci_dev * dev)975 static void pci_enable_acs(struct pci_dev *dev)
976 {
977 if (!pci_acs_enable)
978 goto disable_acs_redir;
979
980 if (!pci_dev_specific_enable_acs(dev))
981 goto disable_acs_redir;
982
983 pci_std_enable_acs(dev);
984
985 disable_acs_redir:
986 /*
987 * Note: pci_disable_acs_redir() must be called even if ACS was not
988 * enabled by the kernel because it may have been enabled by
989 * platform firmware. So if we are told to disable it, we should
990 * always disable it after setting the kernel's default
991 * preferences.
992 */
993 pci_disable_acs_redir(dev);
994 }
995
996 /**
997 * pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
998 * @dev: PCI device to have its BARs restored
999 *
1000 * Restore the BAR values for a given device, so as to make it
1001 * accessible by its driver.
1002 */
pci_restore_bars(struct pci_dev * dev)1003 static void pci_restore_bars(struct pci_dev *dev)
1004 {
1005 int i;
1006
1007 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
1008 pci_update_resource(dev, i);
1009 }
1010
platform_pci_power_manageable(struct pci_dev * dev)1011 static inline bool platform_pci_power_manageable(struct pci_dev *dev)
1012 {
1013 if (pci_use_mid_pm())
1014 return true;
1015
1016 return acpi_pci_power_manageable(dev);
1017 }
1018
platform_pci_set_power_state(struct pci_dev * dev,pci_power_t t)1019 static inline int platform_pci_set_power_state(struct pci_dev *dev,
1020 pci_power_t t)
1021 {
1022 if (pci_use_mid_pm())
1023 return mid_pci_set_power_state(dev, t);
1024
1025 return acpi_pci_set_power_state(dev, t);
1026 }
1027
platform_pci_get_power_state(struct pci_dev * dev)1028 static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
1029 {
1030 if (pci_use_mid_pm())
1031 return mid_pci_get_power_state(dev);
1032
1033 return acpi_pci_get_power_state(dev);
1034 }
1035
platform_pci_refresh_power_state(struct pci_dev * dev)1036 static inline void platform_pci_refresh_power_state(struct pci_dev *dev)
1037 {
1038 if (!pci_use_mid_pm())
1039 acpi_pci_refresh_power_state(dev);
1040 }
1041
platform_pci_choose_state(struct pci_dev * dev)1042 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
1043 {
1044 if (pci_use_mid_pm())
1045 return PCI_POWER_ERROR;
1046
1047 return acpi_pci_choose_state(dev);
1048 }
1049
platform_pci_set_wakeup(struct pci_dev * dev,bool enable)1050 static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable)
1051 {
1052 if (pci_use_mid_pm())
1053 return PCI_POWER_ERROR;
1054
1055 return acpi_pci_wakeup(dev, enable);
1056 }
1057
platform_pci_need_resume(struct pci_dev * dev)1058 static inline bool platform_pci_need_resume(struct pci_dev *dev)
1059 {
1060 if (pci_use_mid_pm())
1061 return false;
1062
1063 return acpi_pci_need_resume(dev);
1064 }
1065
platform_pci_bridge_d3(struct pci_dev * dev)1066 static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
1067 {
1068 if (pci_use_mid_pm())
1069 return false;
1070
1071 return acpi_pci_bridge_d3(dev);
1072 }
1073
1074 /**
1075 * pci_update_current_state - Read power state of given device and cache it
1076 * @dev: PCI device to handle.
1077 * @state: State to cache in case the device doesn't have the PM capability
1078 *
1079 * The power state is read from the PMCSR register, which however is
1080 * inaccessible in D3cold. The platform firmware is therefore queried first
1081 * to detect accessibility of the register. In case the platform firmware
1082 * reports an incorrect state or the device isn't power manageable by the
1083 * platform at all, we try to detect D3cold by testing accessibility of the
1084 * vendor ID in config space.
1085 */
pci_update_current_state(struct pci_dev * dev,pci_power_t state)1086 void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
1087 {
1088 if (platform_pci_get_power_state(dev) == PCI_D3cold) {
1089 dev->current_state = PCI_D3cold;
1090 } else if (dev->pm_cap) {
1091 u16 pmcsr;
1092
1093 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1094 if (PCI_POSSIBLE_ERROR(pmcsr)) {
1095 dev->current_state = PCI_D3cold;
1096 return;
1097 }
1098 dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1099 } else {
1100 dev->current_state = state;
1101 }
1102 }
1103
1104 /**
1105 * pci_refresh_power_state - Refresh the given device's power state data
1106 * @dev: Target PCI device.
1107 *
1108 * Ask the platform to refresh the devices power state information and invoke
1109 * pci_update_current_state() to update its current PCI power state.
1110 */
pci_refresh_power_state(struct pci_dev * dev)1111 void pci_refresh_power_state(struct pci_dev *dev)
1112 {
1113 platform_pci_refresh_power_state(dev);
1114 pci_update_current_state(dev, dev->current_state);
1115 }
1116
1117 /**
1118 * pci_platform_power_transition - Use platform to change device power state
1119 * @dev: PCI device to handle.
1120 * @state: State to put the device into.
1121 */
pci_platform_power_transition(struct pci_dev * dev,pci_power_t state)1122 int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
1123 {
1124 int error;
1125
1126 error = platform_pci_set_power_state(dev, state);
1127 if (!error)
1128 pci_update_current_state(dev, state);
1129 else if (!dev->pm_cap) /* Fall back to PCI_D0 */
1130 dev->current_state = PCI_D0;
1131
1132 return error;
1133 }
1134 EXPORT_SYMBOL_GPL(pci_platform_power_transition);
1135
pci_resume_one(struct pci_dev * pci_dev,void * ign)1136 static int pci_resume_one(struct pci_dev *pci_dev, void *ign)
1137 {
1138 pm_request_resume(&pci_dev->dev);
1139 return 0;
1140 }
1141
1142 /**
1143 * pci_resume_bus - Walk given bus and runtime resume devices on it
1144 * @bus: Top bus of the subtree to walk.
1145 */
pci_resume_bus(struct pci_bus * bus)1146 void pci_resume_bus(struct pci_bus *bus)
1147 {
1148 if (bus)
1149 pci_walk_bus(bus, pci_resume_one, NULL);
1150 }
1151
pci_dev_wait(struct pci_dev * dev,char * reset_type,int timeout)1152 static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
1153 {
1154 int delay = 1;
1155 u32 id;
1156
1157 /*
1158 * After reset, the device should not silently discard config
1159 * requests, but it may still indicate that it needs more time by
1160 * responding to them with CRS completions. The Root Port will
1161 * generally synthesize ~0 (PCI_ERROR_RESPONSE) data to complete
1162 * the read (except when CRS SV is enabled and the read was for the
1163 * Vendor ID; in that case it synthesizes 0x0001 data).
1164 *
1165 * Wait for the device to return a non-CRS completion. Read the
1166 * Command register instead of Vendor ID so we don't have to
1167 * contend with the CRS SV value.
1168 */
1169 pci_read_config_dword(dev, PCI_COMMAND, &id);
1170 while (PCI_POSSIBLE_ERROR(id)) {
1171 if (delay > timeout) {
1172 pci_warn(dev, "not ready %dms after %s; giving up\n",
1173 delay - 1, reset_type);
1174 return -ENOTTY;
1175 }
1176
1177 if (delay > PCI_RESET_WAIT)
1178 pci_info(dev, "not ready %dms after %s; waiting\n",
1179 delay - 1, reset_type);
1180
1181 msleep(delay);
1182 delay *= 2;
1183 pci_read_config_dword(dev, PCI_COMMAND, &id);
1184 }
1185
1186 if (delay > PCI_RESET_WAIT)
1187 pci_info(dev, "ready %dms after %s\n", delay - 1,
1188 reset_type);
1189
1190 return 0;
1191 }
1192
1193 /**
1194 * pci_power_up - Put the given device into D0
1195 * @dev: PCI device to power up
1196 *
1197 * On success, return 0 or 1, depending on whether or not it is necessary to
1198 * restore the device's BARs subsequently (1 is returned in that case).
1199 *
1200 * On failure, return a negative error code. Always return failure if @dev
1201 * lacks a Power Management Capability, even if the platform was able to
1202 * put the device in D0 via non-PCI means.
1203 */
pci_power_up(struct pci_dev * dev)1204 int pci_power_up(struct pci_dev *dev)
1205 {
1206 bool need_restore;
1207 pci_power_t state;
1208 u16 pmcsr;
1209
1210 platform_pci_set_power_state(dev, PCI_D0);
1211
1212 if (!dev->pm_cap) {
1213 state = platform_pci_get_power_state(dev);
1214 if (state == PCI_UNKNOWN)
1215 dev->current_state = PCI_D0;
1216 else
1217 dev->current_state = state;
1218
1219 return -EIO;
1220 }
1221
1222 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1223 if (PCI_POSSIBLE_ERROR(pmcsr)) {
1224 pci_err(dev, "Unable to change power state from %s to D0, device inaccessible\n",
1225 pci_power_name(dev->current_state));
1226 dev->current_state = PCI_D3cold;
1227 return -EIO;
1228 }
1229
1230 state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1231
1232 need_restore = (state == PCI_D3hot || dev->current_state >= PCI_D3hot) &&
1233 !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET);
1234
1235 if (state == PCI_D0)
1236 goto end;
1237
1238 /*
1239 * Force the entire word to 0. This doesn't affect PME_Status, disables
1240 * PME_En, and sets PowerState to 0.
1241 */
1242 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, 0);
1243
1244 /* Mandatory transition delays; see PCI PM 1.2. */
1245 if (state == PCI_D3hot)
1246 pci_dev_d3_sleep(dev);
1247 else if (state == PCI_D2)
1248 udelay(PCI_PM_D2_DELAY);
1249
1250 end:
1251 dev->current_state = PCI_D0;
1252 if (need_restore)
1253 return 1;
1254
1255 return 0;
1256 }
1257
1258 /**
1259 * pci_set_full_power_state - Put a PCI device into D0 and update its state
1260 * @dev: PCI device to power up
1261 *
1262 * Call pci_power_up() to put @dev into D0, read from its PCI_PM_CTRL register
1263 * to confirm the state change, restore its BARs if they might be lost and
1264 * reconfigure ASPM in acordance with the new power state.
1265 *
1266 * If pci_restore_state() is going to be called right after a power state change
1267 * to D0, it is more efficient to use pci_power_up() directly instead of this
1268 * function.
1269 */
pci_set_full_power_state(struct pci_dev * dev)1270 static int pci_set_full_power_state(struct pci_dev *dev)
1271 {
1272 u16 pmcsr;
1273 int ret;
1274
1275 ret = pci_power_up(dev);
1276 if (ret < 0) {
1277 if (dev->current_state == PCI_D0)
1278 return 0;
1279
1280 return ret;
1281 }
1282
1283 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1284 dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1285 if (dev->current_state != PCI_D0) {
1286 pci_info_ratelimited(dev, "Refused to change power state from %s to D0\n",
1287 pci_power_name(dev->current_state));
1288 } else if (ret > 0) {
1289 /*
1290 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
1291 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
1292 * from D3hot to D0 _may_ perform an internal reset, thereby
1293 * going to "D0 Uninitialized" rather than "D0 Initialized".
1294 * For example, at least some versions of the 3c905B and the
1295 * 3c556B exhibit this behaviour.
1296 *
1297 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
1298 * devices in a D3hot state at boot. Consequently, we need to
1299 * restore at least the BARs so that the device will be
1300 * accessible to its driver.
1301 */
1302 pci_restore_bars(dev);
1303 }
1304
1305 if (dev->bus->self)
1306 pcie_aspm_pm_state_change(dev->bus->self);
1307
1308 return 0;
1309 }
1310
1311 /**
1312 * __pci_dev_set_current_state - Set current state of a PCI device
1313 * @dev: Device to handle
1314 * @data: pointer to state to be set
1315 */
__pci_dev_set_current_state(struct pci_dev * dev,void * data)1316 static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
1317 {
1318 pci_power_t state = *(pci_power_t *)data;
1319
1320 dev->current_state = state;
1321 return 0;
1322 }
1323
1324 /**
1325 * pci_bus_set_current_state - Walk given bus and set current state of devices
1326 * @bus: Top bus of the subtree to walk.
1327 * @state: state to be set
1328 */
pci_bus_set_current_state(struct pci_bus * bus,pci_power_t state)1329 void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
1330 {
1331 if (bus)
1332 pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1333 }
1334
1335 /**
1336 * pci_set_low_power_state - Put a PCI device into a low-power state.
1337 * @dev: PCI device to handle.
1338 * @state: PCI power state (D1, D2, D3hot) to put the device into.
1339 *
1340 * Use the device's PCI_PM_CTRL register to put it into a low-power state.
1341 *
1342 * RETURN VALUE:
1343 * -EINVAL if the requested state is invalid.
1344 * -EIO if device does not support PCI PM or its PM capabilities register has a
1345 * wrong version, or device doesn't support the requested state.
1346 * 0 if device already is in the requested state.
1347 * 0 if device's power state has been successfully changed.
1348 */
pci_set_low_power_state(struct pci_dev * dev,pci_power_t state)1349 static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state)
1350 {
1351 u16 pmcsr;
1352
1353 if (!dev->pm_cap)
1354 return -EIO;
1355
1356 /*
1357 * Validate transition: We can enter D0 from any state, but if
1358 * we're already in a low-power state, we can only go deeper. E.g.,
1359 * we can go from D1 to D3, but we can't go directly from D3 to D1;
1360 * we'd have to go from D3 to D0, then to D1.
1361 */
1362 if (dev->current_state <= PCI_D3cold && dev->current_state > state) {
1363 pci_dbg(dev, "Invalid power transition (from %s to %s)\n",
1364 pci_power_name(dev->current_state),
1365 pci_power_name(state));
1366 return -EINVAL;
1367 }
1368
1369 /* Check if this device supports the desired state */
1370 if ((state == PCI_D1 && !dev->d1_support)
1371 || (state == PCI_D2 && !dev->d2_support))
1372 return -EIO;
1373
1374 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1375 if (PCI_POSSIBLE_ERROR(pmcsr)) {
1376 pci_err(dev, "Unable to change power state from %s to %s, device inaccessible\n",
1377 pci_power_name(dev->current_state),
1378 pci_power_name(state));
1379 dev->current_state = PCI_D3cold;
1380 return -EIO;
1381 }
1382
1383 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1384 pmcsr |= state;
1385
1386 /* Enter specified state */
1387 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1388
1389 /* Mandatory power management transition delays; see PCI PM 1.2. */
1390 if (state == PCI_D3hot)
1391 pci_dev_d3_sleep(dev);
1392 else if (state == PCI_D2)
1393 udelay(PCI_PM_D2_DELAY);
1394
1395 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1396 dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1397 if (dev->current_state != state)
1398 pci_info_ratelimited(dev, "Refused to change power state from %s to %s\n",
1399 pci_power_name(dev->current_state),
1400 pci_power_name(state));
1401
1402 if (dev->bus->self)
1403 pcie_aspm_pm_state_change(dev->bus->self);
1404
1405 return 0;
1406 }
1407
1408 /**
1409 * pci_set_power_state - Set the power state of a PCI device
1410 * @dev: PCI device to handle.
1411 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
1412 *
1413 * Transition a device to a new power state, using the platform firmware and/or
1414 * the device's PCI PM registers.
1415 *
1416 * RETURN VALUE:
1417 * -EINVAL if the requested state is invalid.
1418 * -EIO if device does not support PCI PM or its PM capabilities register has a
1419 * wrong version, or device doesn't support the requested state.
1420 * 0 if the transition is to D1 or D2 but D1 and D2 are not supported.
1421 * 0 if device already is in the requested state.
1422 * 0 if the transition is to D3 but D3 is not supported.
1423 * 0 if device's power state has been successfully changed.
1424 */
pci_set_power_state(struct pci_dev * dev,pci_power_t state)1425 int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
1426 {
1427 int error;
1428
1429 /* Bound the state we're entering */
1430 if (state > PCI_D3cold)
1431 state = PCI_D3cold;
1432 else if (state < PCI_D0)
1433 state = PCI_D0;
1434 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
1435
1436 /*
1437 * If the device or the parent bridge do not support PCI
1438 * PM, ignore the request if we're doing anything other
1439 * than putting it into D0 (which would only happen on
1440 * boot).
1441 */
1442 return 0;
1443
1444 /* Check if we're already there */
1445 if (dev->current_state == state)
1446 return 0;
1447
1448 if (state == PCI_D0)
1449 return pci_set_full_power_state(dev);
1450
1451 /*
1452 * This device is quirked not to be put into D3, so don't put it in
1453 * D3
1454 */
1455 if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
1456 return 0;
1457
1458 if (state == PCI_D3cold) {
1459 /*
1460 * To put the device in D3cold, put it into D3hot in the native
1461 * way, then put it into D3cold using platform ops.
1462 */
1463 error = pci_set_low_power_state(dev, PCI_D3hot);
1464
1465 if (pci_platform_power_transition(dev, PCI_D3cold))
1466 return error;
1467
1468 /* Powering off a bridge may power off the whole hierarchy */
1469 if (dev->current_state == PCI_D3cold)
1470 pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
1471 } else {
1472 error = pci_set_low_power_state(dev, state);
1473
1474 if (pci_platform_power_transition(dev, state))
1475 return error;
1476 }
1477
1478 return 0;
1479 }
1480 EXPORT_SYMBOL(pci_set_power_state);
1481
1482 #define PCI_EXP_SAVE_REGS 7
1483
_pci_find_saved_cap(struct pci_dev * pci_dev,u16 cap,bool extended)1484 static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
1485 u16 cap, bool extended)
1486 {
1487 struct pci_cap_saved_state *tmp;
1488
1489 hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
1490 if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
1491 return tmp;
1492 }
1493 return NULL;
1494 }
1495
pci_find_saved_cap(struct pci_dev * dev,char cap)1496 struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
1497 {
1498 return _pci_find_saved_cap(dev, cap, false);
1499 }
1500
pci_find_saved_ext_cap(struct pci_dev * dev,u16 cap)1501 struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
1502 {
1503 return _pci_find_saved_cap(dev, cap, true);
1504 }
1505
pci_save_pcie_state(struct pci_dev * dev)1506 static int pci_save_pcie_state(struct pci_dev *dev)
1507 {
1508 int i = 0;
1509 struct pci_cap_saved_state *save_state;
1510 u16 *cap;
1511
1512 if (!pci_is_pcie(dev))
1513 return 0;
1514
1515 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1516 if (!save_state) {
1517 pci_err(dev, "buffer not found in %s\n", __func__);
1518 return -ENOMEM;
1519 }
1520
1521 cap = (u16 *)&save_state->cap.data[0];
1522 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
1523 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
1524 pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
1525 pcie_capability_read_word(dev, PCI_EXP_RTCTL, &cap[i++]);
1526 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
1527 pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
1528 pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
1529
1530 return 0;
1531 }
1532
pci_bridge_reconfigure_ltr(struct pci_dev * dev)1533 void pci_bridge_reconfigure_ltr(struct pci_dev *dev)
1534 {
1535 #ifdef CONFIG_PCIEASPM
1536 struct pci_dev *bridge;
1537 u32 ctl;
1538
1539 bridge = pci_upstream_bridge(dev);
1540 if (bridge && bridge->ltr_path) {
1541 pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, &ctl);
1542 if (!(ctl & PCI_EXP_DEVCTL2_LTR_EN)) {
1543 pci_dbg(bridge, "re-enabling LTR\n");
1544 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
1545 PCI_EXP_DEVCTL2_LTR_EN);
1546 }
1547 }
1548 #endif
1549 }
1550
pci_restore_pcie_state(struct pci_dev * dev)1551 static void pci_restore_pcie_state(struct pci_dev *dev)
1552 {
1553 int i = 0;
1554 struct pci_cap_saved_state *save_state;
1555 u16 *cap;
1556
1557 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1558 if (!save_state)
1559 return;
1560
1561 /*
1562 * Downstream ports reset the LTR enable bit when link goes down.
1563 * Check and re-configure the bit here before restoring device.
1564 * PCIe r5.0, sec 7.5.3.16.
1565 */
1566 pci_bridge_reconfigure_ltr(dev);
1567
1568 cap = (u16 *)&save_state->cap.data[0];
1569 pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
1570 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
1571 pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
1572 pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
1573 pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
1574 pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
1575 pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
1576 }
1577
pci_save_pcix_state(struct pci_dev * dev)1578 static int pci_save_pcix_state(struct pci_dev *dev)
1579 {
1580 int pos;
1581 struct pci_cap_saved_state *save_state;
1582
1583 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1584 if (!pos)
1585 return 0;
1586
1587 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1588 if (!save_state) {
1589 pci_err(dev, "buffer not found in %s\n", __func__);
1590 return -ENOMEM;
1591 }
1592
1593 pci_read_config_word(dev, pos + PCI_X_CMD,
1594 (u16 *)save_state->cap.data);
1595
1596 return 0;
1597 }
1598
pci_restore_pcix_state(struct pci_dev * dev)1599 static void pci_restore_pcix_state(struct pci_dev *dev)
1600 {
1601 int i = 0, pos;
1602 struct pci_cap_saved_state *save_state;
1603 u16 *cap;
1604
1605 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1606 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1607 if (!save_state || !pos)
1608 return;
1609 cap = (u16 *)&save_state->cap.data[0];
1610
1611 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1612 }
1613
pci_save_ltr_state(struct pci_dev * dev)1614 static void pci_save_ltr_state(struct pci_dev *dev)
1615 {
1616 int ltr;
1617 struct pci_cap_saved_state *save_state;
1618 u32 *cap;
1619
1620 if (!pci_is_pcie(dev))
1621 return;
1622
1623 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1624 if (!ltr)
1625 return;
1626
1627 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1628 if (!save_state) {
1629 pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n");
1630 return;
1631 }
1632
1633 /* Some broken devices only support dword access to LTR */
1634 cap = &save_state->cap.data[0];
1635 pci_read_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap);
1636 }
1637
pci_restore_ltr_state(struct pci_dev * dev)1638 static void pci_restore_ltr_state(struct pci_dev *dev)
1639 {
1640 struct pci_cap_saved_state *save_state;
1641 int ltr;
1642 u32 *cap;
1643
1644 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1645 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1646 if (!save_state || !ltr)
1647 return;
1648
1649 /* Some broken devices only support dword access to LTR */
1650 cap = &save_state->cap.data[0];
1651 pci_write_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap);
1652 }
1653
1654 /**
1655 * pci_save_state - save the PCI configuration space of a device before
1656 * suspending
1657 * @dev: PCI device that we're dealing with
1658 */
pci_save_state(struct pci_dev * dev)1659 int pci_save_state(struct pci_dev *dev)
1660 {
1661 int i;
1662 /* XXX: 100% dword access ok here? */
1663 for (i = 0; i < 16; i++) {
1664 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1665 pci_dbg(dev, "saving config space at offset %#x (reading %#x)\n",
1666 i * 4, dev->saved_config_space[i]);
1667 }
1668 dev->state_saved = true;
1669
1670 i = pci_save_pcie_state(dev);
1671 if (i != 0)
1672 return i;
1673
1674 i = pci_save_pcix_state(dev);
1675 if (i != 0)
1676 return i;
1677
1678 pci_save_ltr_state(dev);
1679 pci_save_dpc_state(dev);
1680 pci_save_aer_state(dev);
1681 pci_save_ptm_state(dev);
1682 return pci_save_vc_state(dev);
1683 }
1684 EXPORT_SYMBOL(pci_save_state);
1685
pci_restore_config_dword(struct pci_dev * pdev,int offset,u32 saved_val,int retry,bool force)1686 static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1687 u32 saved_val, int retry, bool force)
1688 {
1689 u32 val;
1690
1691 pci_read_config_dword(pdev, offset, &val);
1692 if (!force && val == saved_val)
1693 return;
1694
1695 for (;;) {
1696 pci_dbg(pdev, "restoring config space at offset %#x (was %#x, writing %#x)\n",
1697 offset, val, saved_val);
1698 pci_write_config_dword(pdev, offset, saved_val);
1699 if (retry-- <= 0)
1700 return;
1701
1702 pci_read_config_dword(pdev, offset, &val);
1703 if (val == saved_val)
1704 return;
1705
1706 mdelay(1);
1707 }
1708 }
1709
pci_restore_config_space_range(struct pci_dev * pdev,int start,int end,int retry,bool force)1710 static void pci_restore_config_space_range(struct pci_dev *pdev,
1711 int start, int end, int retry,
1712 bool force)
1713 {
1714 int index;
1715
1716 for (index = end; index >= start; index--)
1717 pci_restore_config_dword(pdev, 4 * index,
1718 pdev->saved_config_space[index],
1719 retry, force);
1720 }
1721
pci_restore_config_space(struct pci_dev * pdev)1722 static void pci_restore_config_space(struct pci_dev *pdev)
1723 {
1724 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1725 pci_restore_config_space_range(pdev, 10, 15, 0, false);
1726 /* Restore BARs before the command register. */
1727 pci_restore_config_space_range(pdev, 4, 9, 10, false);
1728 pci_restore_config_space_range(pdev, 0, 3, 0, false);
1729 } else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1730 pci_restore_config_space_range(pdev, 12, 15, 0, false);
1731
1732 /*
1733 * Force rewriting of prefetch registers to avoid S3 resume
1734 * issues on Intel PCI bridges that occur when these
1735 * registers are not explicitly written.
1736 */
1737 pci_restore_config_space_range(pdev, 9, 11, 0, true);
1738 pci_restore_config_space_range(pdev, 0, 8, 0, false);
1739 } else {
1740 pci_restore_config_space_range(pdev, 0, 15, 0, false);
1741 }
1742 }
1743
pci_restore_rebar_state(struct pci_dev * pdev)1744 static void pci_restore_rebar_state(struct pci_dev *pdev)
1745 {
1746 unsigned int pos, nbars, i;
1747 u32 ctrl;
1748
1749 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
1750 if (!pos)
1751 return;
1752
1753 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1754 nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
1755 PCI_REBAR_CTRL_NBAR_SHIFT;
1756
1757 for (i = 0; i < nbars; i++, pos += 8) {
1758 struct resource *res;
1759 int bar_idx, size;
1760
1761 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1762 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
1763 res = pdev->resource + bar_idx;
1764 size = pci_rebar_bytes_to_size(resource_size(res));
1765 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
1766 ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
1767 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
1768 }
1769 }
1770
1771 /**
1772 * pci_restore_state - Restore the saved state of a PCI device
1773 * @dev: PCI device that we're dealing with
1774 */
pci_restore_state(struct pci_dev * dev)1775 void pci_restore_state(struct pci_dev *dev)
1776 {
1777 if (!dev->state_saved)
1778 return;
1779
1780 /*
1781 * Restore max latencies (in the LTR capability) before enabling
1782 * LTR itself (in the PCIe capability).
1783 */
1784 pci_restore_ltr_state(dev);
1785
1786 pci_restore_pcie_state(dev);
1787 pci_restore_pasid_state(dev);
1788 pci_restore_pri_state(dev);
1789 pci_restore_ats_state(dev);
1790 pci_restore_vc_state(dev);
1791 pci_restore_rebar_state(dev);
1792 pci_restore_dpc_state(dev);
1793 pci_restore_ptm_state(dev);
1794
1795 pci_aer_clear_status(dev);
1796 pci_restore_aer_state(dev);
1797
1798 pci_restore_config_space(dev);
1799
1800 pci_restore_pcix_state(dev);
1801 pci_restore_msi_state(dev);
1802
1803 /* Restore ACS and IOV configuration state */
1804 pci_enable_acs(dev);
1805 pci_restore_iov_state(dev);
1806
1807 dev->state_saved = false;
1808 }
1809 EXPORT_SYMBOL(pci_restore_state);
1810
1811 struct pci_saved_state {
1812 u32 config_space[16];
1813 struct pci_cap_saved_data cap[];
1814 };
1815
1816 /**
1817 * pci_store_saved_state - Allocate and return an opaque struct containing
1818 * the device saved state.
1819 * @dev: PCI device that we're dealing with
1820 *
1821 * Return NULL if no state or error.
1822 */
pci_store_saved_state(struct pci_dev * dev)1823 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1824 {
1825 struct pci_saved_state *state;
1826 struct pci_cap_saved_state *tmp;
1827 struct pci_cap_saved_data *cap;
1828 size_t size;
1829
1830 if (!dev->state_saved)
1831 return NULL;
1832
1833 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1834
1835 hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1836 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1837
1838 state = kzalloc(size, GFP_KERNEL);
1839 if (!state)
1840 return NULL;
1841
1842 memcpy(state->config_space, dev->saved_config_space,
1843 sizeof(state->config_space));
1844
1845 cap = state->cap;
1846 hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1847 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1848 memcpy(cap, &tmp->cap, len);
1849 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1850 }
1851 /* Empty cap_save terminates list */
1852
1853 return state;
1854 }
1855 EXPORT_SYMBOL_GPL(pci_store_saved_state);
1856
1857 /**
1858 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1859 * @dev: PCI device that we're dealing with
1860 * @state: Saved state returned from pci_store_saved_state()
1861 */
pci_load_saved_state(struct pci_dev * dev,struct pci_saved_state * state)1862 int pci_load_saved_state(struct pci_dev *dev,
1863 struct pci_saved_state *state)
1864 {
1865 struct pci_cap_saved_data *cap;
1866
1867 dev->state_saved = false;
1868
1869 if (!state)
1870 return 0;
1871
1872 memcpy(dev->saved_config_space, state->config_space,
1873 sizeof(state->config_space));
1874
1875 cap = state->cap;
1876 while (cap->size) {
1877 struct pci_cap_saved_state *tmp;
1878
1879 tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
1880 if (!tmp || tmp->cap.size != cap->size)
1881 return -EINVAL;
1882
1883 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1884 cap = (struct pci_cap_saved_data *)((u8 *)cap +
1885 sizeof(struct pci_cap_saved_data) + cap->size);
1886 }
1887
1888 dev->state_saved = true;
1889 return 0;
1890 }
1891 EXPORT_SYMBOL_GPL(pci_load_saved_state);
1892
1893 /**
1894 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1895 * and free the memory allocated for it.
1896 * @dev: PCI device that we're dealing with
1897 * @state: Pointer to saved state returned from pci_store_saved_state()
1898 */
pci_load_and_free_saved_state(struct pci_dev * dev,struct pci_saved_state ** state)1899 int pci_load_and_free_saved_state(struct pci_dev *dev,
1900 struct pci_saved_state **state)
1901 {
1902 int ret = pci_load_saved_state(dev, *state);
1903 kfree(*state);
1904 *state = NULL;
1905 return ret;
1906 }
1907 EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1908
pcibios_enable_device(struct pci_dev * dev,int bars)1909 int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
1910 {
1911 return pci_enable_resources(dev, bars);
1912 }
1913
do_pci_enable_device(struct pci_dev * dev,int bars)1914 static int do_pci_enable_device(struct pci_dev *dev, int bars)
1915 {
1916 int err;
1917 struct pci_dev *bridge;
1918 u16 cmd;
1919 u8 pin;
1920
1921 err = pci_set_power_state(dev, PCI_D0);
1922 if (err < 0 && err != -EIO)
1923 return err;
1924
1925 bridge = pci_upstream_bridge(dev);
1926 if (bridge)
1927 pcie_aspm_powersave_config_link(bridge);
1928
1929 err = pcibios_enable_device(dev, bars);
1930 if (err < 0)
1931 return err;
1932 pci_fixup_device(pci_fixup_enable, dev);
1933
1934 if (dev->msi_enabled || dev->msix_enabled)
1935 return 0;
1936
1937 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
1938 if (pin) {
1939 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1940 if (cmd & PCI_COMMAND_INTX_DISABLE)
1941 pci_write_config_word(dev, PCI_COMMAND,
1942 cmd & ~PCI_COMMAND_INTX_DISABLE);
1943 }
1944
1945 return 0;
1946 }
1947
1948 /**
1949 * pci_reenable_device - Resume abandoned device
1950 * @dev: PCI device to be resumed
1951 *
1952 * NOTE: This function is a backend of pci_default_resume() and is not supposed
1953 * to be called by normal code, write proper resume handler and use it instead.
1954 */
pci_reenable_device(struct pci_dev * dev)1955 int pci_reenable_device(struct pci_dev *dev)
1956 {
1957 if (pci_is_enabled(dev))
1958 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1959 return 0;
1960 }
1961 EXPORT_SYMBOL(pci_reenable_device);
1962
pci_enable_bridge(struct pci_dev * dev)1963 static void pci_enable_bridge(struct pci_dev *dev)
1964 {
1965 struct pci_dev *bridge;
1966 int retval;
1967
1968 bridge = pci_upstream_bridge(dev);
1969 if (bridge)
1970 pci_enable_bridge(bridge);
1971
1972 if (pci_is_enabled(dev)) {
1973 if (!dev->is_busmaster)
1974 pci_set_master(dev);
1975 return;
1976 }
1977
1978 retval = pci_enable_device(dev);
1979 if (retval)
1980 pci_err(dev, "Error enabling bridge (%d), continuing\n",
1981 retval);
1982 pci_set_master(dev);
1983 }
1984
pci_enable_device_flags(struct pci_dev * dev,unsigned long flags)1985 static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
1986 {
1987 struct pci_dev *bridge;
1988 int err;
1989 int i, bars = 0;
1990
1991 /*
1992 * Power state could be unknown at this point, either due to a fresh
1993 * boot or a device removal call. So get the current power state
1994 * so that things like MSI message writing will behave as expected
1995 * (e.g. if the device really is in D0 at enable time).
1996 */
1997 pci_update_current_state(dev, dev->current_state);
1998
1999 if (atomic_inc_return(&dev->enable_cnt) > 1)
2000 return 0; /* already enabled */
2001
2002 bridge = pci_upstream_bridge(dev);
2003 if (bridge)
2004 pci_enable_bridge(bridge);
2005
2006 /* only skip sriov related */
2007 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
2008 if (dev->resource[i].flags & flags)
2009 bars |= (1 << i);
2010 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
2011 if (dev->resource[i].flags & flags)
2012 bars |= (1 << i);
2013
2014 err = do_pci_enable_device(dev, bars);
2015 if (err < 0)
2016 atomic_dec(&dev->enable_cnt);
2017 return err;
2018 }
2019
2020 /**
2021 * pci_enable_device_io - Initialize a device for use with IO space
2022 * @dev: PCI device to be initialized
2023 *
2024 * Initialize device before it's used by a driver. Ask low-level code
2025 * to enable I/O resources. Wake up the device if it was suspended.
2026 * Beware, this function can fail.
2027 */
pci_enable_device_io(struct pci_dev * dev)2028 int pci_enable_device_io(struct pci_dev *dev)
2029 {
2030 return pci_enable_device_flags(dev, IORESOURCE_IO);
2031 }
2032 EXPORT_SYMBOL(pci_enable_device_io);
2033
2034 /**
2035 * pci_enable_device_mem - Initialize a device for use with Memory space
2036 * @dev: PCI device to be initialized
2037 *
2038 * Initialize device before it's used by a driver. Ask low-level code
2039 * to enable Memory resources. Wake up the device if it was suspended.
2040 * Beware, this function can fail.
2041 */
pci_enable_device_mem(struct pci_dev * dev)2042 int pci_enable_device_mem(struct pci_dev *dev)
2043 {
2044 return pci_enable_device_flags(dev, IORESOURCE_MEM);
2045 }
2046 EXPORT_SYMBOL(pci_enable_device_mem);
2047
2048 /**
2049 * pci_enable_device - Initialize device before it's used by a driver.
2050 * @dev: PCI device to be initialized
2051 *
2052 * Initialize device before it's used by a driver. Ask low-level code
2053 * to enable I/O and memory. Wake up the device if it was suspended.
2054 * Beware, this function can fail.
2055 *
2056 * Note we don't actually enable the device many times if we call
2057 * this function repeatedly (we just increment the count).
2058 */
pci_enable_device(struct pci_dev * dev)2059 int pci_enable_device(struct pci_dev *dev)
2060 {
2061 return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
2062 }
2063 EXPORT_SYMBOL(pci_enable_device);
2064
2065 /*
2066 * Managed PCI resources. This manages device on/off, INTx/MSI/MSI-X
2067 * on/off and BAR regions. pci_dev itself records MSI/MSI-X status, so
2068 * there's no need to track it separately. pci_devres is initialized
2069 * when a device is enabled using managed PCI device enable interface.
2070 */
2071 struct pci_devres {
2072 unsigned int enabled:1;
2073 unsigned int pinned:1;
2074 unsigned int orig_intx:1;
2075 unsigned int restore_intx:1;
2076 unsigned int mwi:1;
2077 u32 region_mask;
2078 };
2079
pcim_release(struct device * gendev,void * res)2080 static void pcim_release(struct device *gendev, void *res)
2081 {
2082 struct pci_dev *dev = to_pci_dev(gendev);
2083 struct pci_devres *this = res;
2084 int i;
2085
2086 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
2087 if (this->region_mask & (1 << i))
2088 pci_release_region(dev, i);
2089
2090 if (this->mwi)
2091 pci_clear_mwi(dev);
2092
2093 if (this->restore_intx)
2094 pci_intx(dev, this->orig_intx);
2095
2096 if (this->enabled && !this->pinned)
2097 pci_disable_device(dev);
2098 }
2099
get_pci_dr(struct pci_dev * pdev)2100 static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
2101 {
2102 struct pci_devres *dr, *new_dr;
2103
2104 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
2105 if (dr)
2106 return dr;
2107
2108 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
2109 if (!new_dr)
2110 return NULL;
2111 return devres_get(&pdev->dev, new_dr, NULL, NULL);
2112 }
2113
find_pci_dr(struct pci_dev * pdev)2114 static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
2115 {
2116 if (pci_is_managed(pdev))
2117 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
2118 return NULL;
2119 }
2120
2121 /**
2122 * pcim_enable_device - Managed pci_enable_device()
2123 * @pdev: PCI device to be initialized
2124 *
2125 * Managed pci_enable_device().
2126 */
pcim_enable_device(struct pci_dev * pdev)2127 int pcim_enable_device(struct pci_dev *pdev)
2128 {
2129 struct pci_devres *dr;
2130 int rc;
2131
2132 dr = get_pci_dr(pdev);
2133 if (unlikely(!dr))
2134 return -ENOMEM;
2135 if (dr->enabled)
2136 return 0;
2137
2138 rc = pci_enable_device(pdev);
2139 if (!rc) {
2140 pdev->is_managed = 1;
2141 dr->enabled = 1;
2142 }
2143 return rc;
2144 }
2145 EXPORT_SYMBOL(pcim_enable_device);
2146
2147 /**
2148 * pcim_pin_device - Pin managed PCI device
2149 * @pdev: PCI device to pin
2150 *
2151 * Pin managed PCI device @pdev. Pinned device won't be disabled on
2152 * driver detach. @pdev must have been enabled with
2153 * pcim_enable_device().
2154 */
pcim_pin_device(struct pci_dev * pdev)2155 void pcim_pin_device(struct pci_dev *pdev)
2156 {
2157 struct pci_devres *dr;
2158
2159 dr = find_pci_dr(pdev);
2160 WARN_ON(!dr || !dr->enabled);
2161 if (dr)
2162 dr->pinned = 1;
2163 }
2164 EXPORT_SYMBOL(pcim_pin_device);
2165
2166 /*
2167 * pcibios_device_add - provide arch specific hooks when adding device dev
2168 * @dev: the PCI device being added
2169 *
2170 * Permits the platform to provide architecture specific functionality when
2171 * devices are added. This is the default implementation. Architecture
2172 * implementations can override this.
2173 */
pcibios_device_add(struct pci_dev * dev)2174 int __weak pcibios_device_add(struct pci_dev *dev)
2175 {
2176 return 0;
2177 }
2178
2179 /**
2180 * pcibios_release_device - provide arch specific hooks when releasing
2181 * device dev
2182 * @dev: the PCI device being released
2183 *
2184 * Permits the platform to provide architecture specific functionality when
2185 * devices are released. This is the default implementation. Architecture
2186 * implementations can override this.
2187 */
pcibios_release_device(struct pci_dev * dev)2188 void __weak pcibios_release_device(struct pci_dev *dev) {}
2189
2190 /**
2191 * pcibios_disable_device - disable arch specific PCI resources for device dev
2192 * @dev: the PCI device to disable
2193 *
2194 * Disables architecture specific PCI resources for the device. This
2195 * is the default implementation. Architecture implementations can
2196 * override this.
2197 */
pcibios_disable_device(struct pci_dev * dev)2198 void __weak pcibios_disable_device(struct pci_dev *dev) {}
2199
2200 /**
2201 * pcibios_penalize_isa_irq - penalize an ISA IRQ
2202 * @irq: ISA IRQ to penalize
2203 * @active: IRQ active or not
2204 *
2205 * Permits the platform to provide architecture-specific functionality when
2206 * penalizing ISA IRQs. This is the default implementation. Architecture
2207 * implementations can override this.
2208 */
pcibios_penalize_isa_irq(int irq,int active)2209 void __weak pcibios_penalize_isa_irq(int irq, int active) {}
2210
do_pci_disable_device(struct pci_dev * dev)2211 static void do_pci_disable_device(struct pci_dev *dev)
2212 {
2213 u16 pci_command;
2214
2215 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
2216 if (pci_command & PCI_COMMAND_MASTER) {
2217 pci_command &= ~PCI_COMMAND_MASTER;
2218 pci_write_config_word(dev, PCI_COMMAND, pci_command);
2219 }
2220
2221 pcibios_disable_device(dev);
2222 }
2223
2224 /**
2225 * pci_disable_enabled_device - Disable device without updating enable_cnt
2226 * @dev: PCI device to disable
2227 *
2228 * NOTE: This function is a backend of PCI power management routines and is
2229 * not supposed to be called drivers.
2230 */
pci_disable_enabled_device(struct pci_dev * dev)2231 void pci_disable_enabled_device(struct pci_dev *dev)
2232 {
2233 if (pci_is_enabled(dev))
2234 do_pci_disable_device(dev);
2235 }
2236
2237 /**
2238 * pci_disable_device - Disable PCI device after use
2239 * @dev: PCI device to be disabled
2240 *
2241 * Signal to the system that the PCI device is not in use by the system
2242 * anymore. This only involves disabling PCI bus-mastering, if active.
2243 *
2244 * Note we don't actually disable the device until all callers of
2245 * pci_enable_device() have called pci_disable_device().
2246 */
pci_disable_device(struct pci_dev * dev)2247 void pci_disable_device(struct pci_dev *dev)
2248 {
2249 struct pci_devres *dr;
2250
2251 dr = find_pci_dr(dev);
2252 if (dr)
2253 dr->enabled = 0;
2254
2255 dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
2256 "disabling already-disabled device");
2257
2258 if (atomic_dec_return(&dev->enable_cnt) != 0)
2259 return;
2260
2261 do_pci_disable_device(dev);
2262
2263 dev->is_busmaster = 0;
2264 }
2265 EXPORT_SYMBOL(pci_disable_device);
2266
2267 /**
2268 * pcibios_set_pcie_reset_state - set reset state for device dev
2269 * @dev: the PCIe device reset
2270 * @state: Reset state to enter into
2271 *
2272 * Set the PCIe reset state for the device. This is the default
2273 * implementation. Architecture implementations can override this.
2274 */
pcibios_set_pcie_reset_state(struct pci_dev * dev,enum pcie_reset_state state)2275 int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
2276 enum pcie_reset_state state)
2277 {
2278 return -EINVAL;
2279 }
2280
2281 /**
2282 * pci_set_pcie_reset_state - set reset state for device dev
2283 * @dev: the PCIe device reset
2284 * @state: Reset state to enter into
2285 *
2286 * Sets the PCI reset state for the device.
2287 */
pci_set_pcie_reset_state(struct pci_dev * dev,enum pcie_reset_state state)2288 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
2289 {
2290 return pcibios_set_pcie_reset_state(dev, state);
2291 }
2292 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
2293
2294 #ifdef CONFIG_PCIEAER
pcie_clear_device_status(struct pci_dev * dev)2295 void pcie_clear_device_status(struct pci_dev *dev)
2296 {
2297 u16 sta;
2298
2299 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta);
2300 pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta);
2301 }
2302 #endif
2303
2304 /**
2305 * pcie_clear_root_pme_status - Clear root port PME interrupt status.
2306 * @dev: PCIe root port or event collector.
2307 */
pcie_clear_root_pme_status(struct pci_dev * dev)2308 void pcie_clear_root_pme_status(struct pci_dev *dev)
2309 {
2310 pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME);
2311 }
2312
2313 /**
2314 * pci_check_pme_status - Check if given device has generated PME.
2315 * @dev: Device to check.
2316 *
2317 * Check the PME status of the device and if set, clear it and clear PME enable
2318 * (if set). Return 'true' if PME status and PME enable were both set or
2319 * 'false' otherwise.
2320 */
pci_check_pme_status(struct pci_dev * dev)2321 bool pci_check_pme_status(struct pci_dev *dev)
2322 {
2323 int pmcsr_pos;
2324 u16 pmcsr;
2325 bool ret = false;
2326
2327 if (!dev->pm_cap)
2328 return false;
2329
2330 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
2331 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
2332 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
2333 return false;
2334
2335 /* Clear PME status. */
2336 pmcsr |= PCI_PM_CTRL_PME_STATUS;
2337 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
2338 /* Disable PME to avoid interrupt flood. */
2339 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2340 ret = true;
2341 }
2342
2343 pci_write_config_word(dev, pmcsr_pos, pmcsr);
2344
2345 return ret;
2346 }
2347
2348 /**
2349 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
2350 * @dev: Device to handle.
2351 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
2352 *
2353 * Check if @dev has generated PME and queue a resume request for it in that
2354 * case.
2355 */
pci_pme_wakeup(struct pci_dev * dev,void * pme_poll_reset)2356 static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
2357 {
2358 if (pme_poll_reset && dev->pme_poll)
2359 dev->pme_poll = false;
2360
2361 if (pci_check_pme_status(dev)) {
2362 pci_wakeup_event(dev);
2363 pm_request_resume(&dev->dev);
2364 }
2365 return 0;
2366 }
2367
2368 /**
2369 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
2370 * @bus: Top bus of the subtree to walk.
2371 */
pci_pme_wakeup_bus(struct pci_bus * bus)2372 void pci_pme_wakeup_bus(struct pci_bus *bus)
2373 {
2374 if (bus)
2375 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
2376 }
2377
2378
2379 /**
2380 * pci_pme_capable - check the capability of PCI device to generate PME#
2381 * @dev: PCI device to handle.
2382 * @state: PCI state from which device will issue PME#.
2383 */
pci_pme_capable(struct pci_dev * dev,pci_power_t state)2384 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
2385 {
2386 if (!dev->pm_cap)
2387 return false;
2388
2389 return !!(dev->pme_support & (1 << state));
2390 }
2391 EXPORT_SYMBOL(pci_pme_capable);
2392
pci_pme_list_scan(struct work_struct * work)2393 static void pci_pme_list_scan(struct work_struct *work)
2394 {
2395 struct pci_pme_device *pme_dev, *n;
2396
2397 mutex_lock(&pci_pme_list_mutex);
2398 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
2399 if (pme_dev->dev->pme_poll) {
2400 struct pci_dev *bridge;
2401
2402 bridge = pme_dev->dev->bus->self;
2403 /*
2404 * If bridge is in low power state, the
2405 * configuration space of subordinate devices
2406 * may be not accessible
2407 */
2408 if (bridge && bridge->current_state != PCI_D0)
2409 continue;
2410 /*
2411 * If the device is in D3cold it should not be
2412 * polled either.
2413 */
2414 if (pme_dev->dev->current_state == PCI_D3cold)
2415 continue;
2416
2417 pci_pme_wakeup(pme_dev->dev, NULL);
2418 } else {
2419 list_del(&pme_dev->list);
2420 kfree(pme_dev);
2421 }
2422 }
2423 if (!list_empty(&pci_pme_list))
2424 queue_delayed_work(system_freezable_wq, &pci_pme_work,
2425 msecs_to_jiffies(PME_TIMEOUT));
2426 mutex_unlock(&pci_pme_list_mutex);
2427 }
2428
__pci_pme_active(struct pci_dev * dev,bool enable)2429 static void __pci_pme_active(struct pci_dev *dev, bool enable)
2430 {
2431 u16 pmcsr;
2432
2433 if (!dev->pme_support)
2434 return;
2435
2436 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2437 /* Clear PME_Status by writing 1 to it and enable PME# */
2438 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
2439 if (!enable)
2440 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2441
2442 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2443 }
2444
2445 /**
2446 * pci_pme_restore - Restore PME configuration after config space restore.
2447 * @dev: PCI device to update.
2448 */
pci_pme_restore(struct pci_dev * dev)2449 void pci_pme_restore(struct pci_dev *dev)
2450 {
2451 u16 pmcsr;
2452
2453 if (!dev->pme_support)
2454 return;
2455
2456 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2457 if (dev->wakeup_prepared) {
2458 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2459 pmcsr &= ~PCI_PM_CTRL_PME_STATUS;
2460 } else {
2461 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2462 pmcsr |= PCI_PM_CTRL_PME_STATUS;
2463 }
2464 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2465 }
2466
2467 /**
2468 * pci_pme_active - enable or disable PCI device's PME# function
2469 * @dev: PCI device to handle.
2470 * @enable: 'true' to enable PME# generation; 'false' to disable it.
2471 *
2472 * The caller must verify that the device is capable of generating PME# before
2473 * calling this function with @enable equal to 'true'.
2474 */
pci_pme_active(struct pci_dev * dev,bool enable)2475 void pci_pme_active(struct pci_dev *dev, bool enable)
2476 {
2477 __pci_pme_active(dev, enable);
2478
2479 /*
2480 * PCI (as opposed to PCIe) PME requires that the device have
2481 * its PME# line hooked up correctly. Not all hardware vendors
2482 * do this, so the PME never gets delivered and the device
2483 * remains asleep. The easiest way around this is to
2484 * periodically walk the list of suspended devices and check
2485 * whether any have their PME flag set. The assumption is that
2486 * we'll wake up often enough anyway that this won't be a huge
2487 * hit, and the power savings from the devices will still be a
2488 * win.
2489 *
2490 * Although PCIe uses in-band PME message instead of PME# line
2491 * to report PME, PME does not work for some PCIe devices in
2492 * reality. For example, there are devices that set their PME
2493 * status bits, but don't really bother to send a PME message;
2494 * there are PCI Express Root Ports that don't bother to
2495 * trigger interrupts when they receive PME messages from the
2496 * devices below. So PME poll is used for PCIe devices too.
2497 */
2498
2499 if (dev->pme_poll) {
2500 struct pci_pme_device *pme_dev;
2501 if (enable) {
2502 pme_dev = kmalloc(sizeof(struct pci_pme_device),
2503 GFP_KERNEL);
2504 if (!pme_dev) {
2505 pci_warn(dev, "can't enable PME#\n");
2506 return;
2507 }
2508 pme_dev->dev = dev;
2509 mutex_lock(&pci_pme_list_mutex);
2510 list_add(&pme_dev->list, &pci_pme_list);
2511 if (list_is_singular(&pci_pme_list))
2512 queue_delayed_work(system_freezable_wq,
2513 &pci_pme_work,
2514 msecs_to_jiffies(PME_TIMEOUT));
2515 mutex_unlock(&pci_pme_list_mutex);
2516 } else {
2517 mutex_lock(&pci_pme_list_mutex);
2518 list_for_each_entry(pme_dev, &pci_pme_list, list) {
2519 if (pme_dev->dev == dev) {
2520 list_del(&pme_dev->list);
2521 kfree(pme_dev);
2522 break;
2523 }
2524 }
2525 mutex_unlock(&pci_pme_list_mutex);
2526 }
2527 }
2528
2529 pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled");
2530 }
2531 EXPORT_SYMBOL(pci_pme_active);
2532
2533 /**
2534 * __pci_enable_wake - enable PCI device as wakeup event source
2535 * @dev: PCI device affected
2536 * @state: PCI state from which device will issue wakeup events
2537 * @enable: True to enable event generation; false to disable
2538 *
2539 * This enables the device as a wakeup event source, or disables it.
2540 * When such events involves platform-specific hooks, those hooks are
2541 * called automatically by this routine.
2542 *
2543 * Devices with legacy power management (no standard PCI PM capabilities)
2544 * always require such platform hooks.
2545 *
2546 * RETURN VALUE:
2547 * 0 is returned on success
2548 * -EINVAL is returned if device is not supposed to wake up the system
2549 * Error code depending on the platform is returned if both the platform and
2550 * the native mechanism fail to enable the generation of wake-up events
2551 */
__pci_enable_wake(struct pci_dev * dev,pci_power_t state,bool enable)2552 static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
2553 {
2554 int ret = 0;
2555
2556 /*
2557 * Bridges that are not power-manageable directly only signal
2558 * wakeup on behalf of subordinate devices which is set up
2559 * elsewhere, so skip them. However, bridges that are
2560 * power-manageable may signal wakeup for themselves (for example,
2561 * on a hotplug event) and they need to be covered here.
2562 */
2563 if (!pci_power_manageable(dev))
2564 return 0;
2565
2566 /* Don't do the same thing twice in a row for one device. */
2567 if (!!enable == !!dev->wakeup_prepared)
2568 return 0;
2569
2570 /*
2571 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
2572 * Anderson we should be doing PME# wake enable followed by ACPI wake
2573 * enable. To disable wake-up we call the platform first, for symmetry.
2574 */
2575
2576 if (enable) {
2577 int error;
2578
2579 /*
2580 * Enable PME signaling if the device can signal PME from
2581 * D3cold regardless of whether or not it can signal PME from
2582 * the current target state, because that will allow it to
2583 * signal PME when the hierarchy above it goes into D3cold and
2584 * the device itself ends up in D3cold as a result of that.
2585 */
2586 if (pci_pme_capable(dev, state) || pci_pme_capable(dev, PCI_D3cold))
2587 pci_pme_active(dev, true);
2588 else
2589 ret = 1;
2590 error = platform_pci_set_wakeup(dev, true);
2591 if (ret)
2592 ret = error;
2593 if (!ret)
2594 dev->wakeup_prepared = true;
2595 } else {
2596 platform_pci_set_wakeup(dev, false);
2597 pci_pme_active(dev, false);
2598 dev->wakeup_prepared = false;
2599 }
2600
2601 return ret;
2602 }
2603
2604 /**
2605 * pci_enable_wake - change wakeup settings for a PCI device
2606 * @pci_dev: Target device
2607 * @state: PCI state from which device will issue wakeup events
2608 * @enable: Whether or not to enable event generation
2609 *
2610 * If @enable is set, check device_may_wakeup() for the device before calling
2611 * __pci_enable_wake() for it.
2612 */
pci_enable_wake(struct pci_dev * pci_dev,pci_power_t state,bool enable)2613 int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
2614 {
2615 if (enable && !device_may_wakeup(&pci_dev->dev))
2616 return -EINVAL;
2617
2618 return __pci_enable_wake(pci_dev, state, enable);
2619 }
2620 EXPORT_SYMBOL(pci_enable_wake);
2621
2622 /**
2623 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
2624 * @dev: PCI device to prepare
2625 * @enable: True to enable wake-up event generation; false to disable
2626 *
2627 * Many drivers want the device to wake up the system from D3_hot or D3_cold
2628 * and this function allows them to set that up cleanly - pci_enable_wake()
2629 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
2630 * ordering constraints.
2631 *
2632 * This function only returns error code if the device is not allowed to wake
2633 * up the system from sleep or it is not capable of generating PME# from both
2634 * D3_hot and D3_cold and the platform is unable to enable wake-up power for it.
2635 */
pci_wake_from_d3(struct pci_dev * dev,bool enable)2636 int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2637 {
2638 return pci_pme_capable(dev, PCI_D3cold) ?
2639 pci_enable_wake(dev, PCI_D3cold, enable) :
2640 pci_enable_wake(dev, PCI_D3hot, enable);
2641 }
2642 EXPORT_SYMBOL(pci_wake_from_d3);
2643
2644 /**
2645 * pci_target_state - find an appropriate low power state for a given PCI dev
2646 * @dev: PCI device
2647 * @wakeup: Whether or not wakeup functionality will be enabled for the device.
2648 *
2649 * Use underlying platform code to find a supported low power state for @dev.
2650 * If the platform can't manage @dev, return the deepest state from which it
2651 * can generate wake events, based on any available PME info.
2652 */
pci_target_state(struct pci_dev * dev,bool wakeup)2653 static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
2654 {
2655 if (platform_pci_power_manageable(dev)) {
2656 /*
2657 * Call the platform to find the target state for the device.
2658 */
2659 pci_power_t state = platform_pci_choose_state(dev);
2660
2661 switch (state) {
2662 case PCI_POWER_ERROR:
2663 case PCI_UNKNOWN:
2664 return PCI_D3hot;
2665
2666 case PCI_D1:
2667 case PCI_D2:
2668 if (pci_no_d1d2(dev))
2669 return PCI_D3hot;
2670 }
2671
2672 return state;
2673 }
2674
2675 /*
2676 * If the device is in D3cold even though it's not power-manageable by
2677 * the platform, it may have been powered down by non-standard means.
2678 * Best to let it slumber.
2679 */
2680 if (dev->current_state == PCI_D3cold)
2681 return PCI_D3cold;
2682 else if (!dev->pm_cap)
2683 return PCI_D0;
2684
2685 if (wakeup && dev->pme_support) {
2686 pci_power_t state = PCI_D3hot;
2687
2688 /*
2689 * Find the deepest state from which the device can generate
2690 * PME#.
2691 */
2692 while (state && !(dev->pme_support & (1 << state)))
2693 state--;
2694
2695 if (state)
2696 return state;
2697 else if (dev->pme_support & 1)
2698 return PCI_D0;
2699 }
2700
2701 return PCI_D3hot;
2702 }
2703
2704 /**
2705 * pci_prepare_to_sleep - prepare PCI device for system-wide transition
2706 * into a sleep state
2707 * @dev: Device to handle.
2708 *
2709 * Choose the power state appropriate for the device depending on whether
2710 * it can wake up the system and/or is power manageable by the platform
2711 * (PCI_D3hot is the default) and put the device into that state.
2712 */
pci_prepare_to_sleep(struct pci_dev * dev)2713 int pci_prepare_to_sleep(struct pci_dev *dev)
2714 {
2715 bool wakeup = device_may_wakeup(&dev->dev);
2716 pci_power_t target_state = pci_target_state(dev, wakeup);
2717 int error;
2718
2719 if (target_state == PCI_POWER_ERROR)
2720 return -EIO;
2721
2722 pci_enable_wake(dev, target_state, wakeup);
2723
2724 error = pci_set_power_state(dev, target_state);
2725
2726 if (error)
2727 pci_enable_wake(dev, target_state, false);
2728
2729 return error;
2730 }
2731 EXPORT_SYMBOL(pci_prepare_to_sleep);
2732
2733 /**
2734 * pci_back_from_sleep - turn PCI device on during system-wide transition
2735 * into working state
2736 * @dev: Device to handle.
2737 *
2738 * Disable device's system wake-up capability and put it into D0.
2739 */
pci_back_from_sleep(struct pci_dev * dev)2740 int pci_back_from_sleep(struct pci_dev *dev)
2741 {
2742 int ret = pci_set_power_state(dev, PCI_D0);
2743
2744 if (ret)
2745 return ret;
2746
2747 pci_enable_wake(dev, PCI_D0, false);
2748 return 0;
2749 }
2750 EXPORT_SYMBOL(pci_back_from_sleep);
2751
2752 /**
2753 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
2754 * @dev: PCI device being suspended.
2755 *
2756 * Prepare @dev to generate wake-up events at run time and put it into a low
2757 * power state.
2758 */
pci_finish_runtime_suspend(struct pci_dev * dev)2759 int pci_finish_runtime_suspend(struct pci_dev *dev)
2760 {
2761 pci_power_t target_state;
2762 int error;
2763
2764 target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
2765 if (target_state == PCI_POWER_ERROR)
2766 return -EIO;
2767
2768 __pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
2769
2770 error = pci_set_power_state(dev, target_state);
2771
2772 if (error)
2773 pci_enable_wake(dev, target_state, false);
2774
2775 return error;
2776 }
2777
2778 /**
2779 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
2780 * @dev: Device to check.
2781 *
2782 * Return true if the device itself is capable of generating wake-up events
2783 * (through the platform or using the native PCIe PME) or if the device supports
2784 * PME and one of its upstream bridges can generate wake-up events.
2785 */
pci_dev_run_wake(struct pci_dev * dev)2786 bool pci_dev_run_wake(struct pci_dev *dev)
2787 {
2788 struct pci_bus *bus = dev->bus;
2789
2790 if (!dev->pme_support)
2791 return false;
2792
2793 /* PME-capable in principle, but not from the target power state */
2794 if (!pci_pme_capable(dev, pci_target_state(dev, true)))
2795 return false;
2796
2797 if (device_can_wakeup(&dev->dev))
2798 return true;
2799
2800 while (bus->parent) {
2801 struct pci_dev *bridge = bus->self;
2802
2803 if (device_can_wakeup(&bridge->dev))
2804 return true;
2805
2806 bus = bus->parent;
2807 }
2808
2809 /* We have reached the root bus. */
2810 if (bus->bridge)
2811 return device_can_wakeup(bus->bridge);
2812
2813 return false;
2814 }
2815 EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2816
2817 /**
2818 * pci_dev_need_resume - Check if it is necessary to resume the device.
2819 * @pci_dev: Device to check.
2820 *
2821 * Return 'true' if the device is not runtime-suspended or it has to be
2822 * reconfigured due to wakeup settings difference between system and runtime
2823 * suspend, or the current power state of it is not suitable for the upcoming
2824 * (system-wide) transition.
2825 */
pci_dev_need_resume(struct pci_dev * pci_dev)2826 bool pci_dev_need_resume(struct pci_dev *pci_dev)
2827 {
2828 struct device *dev = &pci_dev->dev;
2829 pci_power_t target_state;
2830
2831 if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev))
2832 return true;
2833
2834 target_state = pci_target_state(pci_dev, device_may_wakeup(dev));
2835
2836 /*
2837 * If the earlier platform check has not triggered, D3cold is just power
2838 * removal on top of D3hot, so no need to resume the device in that
2839 * case.
2840 */
2841 return target_state != pci_dev->current_state &&
2842 target_state != PCI_D3cold &&
2843 pci_dev->current_state != PCI_D3hot;
2844 }
2845
2846 /**
2847 * pci_dev_adjust_pme - Adjust PME setting for a suspended device.
2848 * @pci_dev: Device to check.
2849 *
2850 * If the device is suspended and it is not configured for system wakeup,
2851 * disable PME for it to prevent it from waking up the system unnecessarily.
2852 *
2853 * Note that if the device's power state is D3cold and the platform check in
2854 * pci_dev_need_resume() has not triggered, the device's configuration need not
2855 * be changed.
2856 */
pci_dev_adjust_pme(struct pci_dev * pci_dev)2857 void pci_dev_adjust_pme(struct pci_dev *pci_dev)
2858 {
2859 struct device *dev = &pci_dev->dev;
2860
2861 spin_lock_irq(&dev->power.lock);
2862
2863 if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) &&
2864 pci_dev->current_state < PCI_D3cold)
2865 __pci_pme_active(pci_dev, false);
2866
2867 spin_unlock_irq(&dev->power.lock);
2868 }
2869
2870 /**
2871 * pci_dev_complete_resume - Finalize resume from system sleep for a device.
2872 * @pci_dev: Device to handle.
2873 *
2874 * If the device is runtime suspended and wakeup-capable, enable PME for it as
2875 * it might have been disabled during the prepare phase of system suspend if
2876 * the device was not configured for system wakeup.
2877 */
pci_dev_complete_resume(struct pci_dev * pci_dev)2878 void pci_dev_complete_resume(struct pci_dev *pci_dev)
2879 {
2880 struct device *dev = &pci_dev->dev;
2881
2882 if (!pci_dev_run_wake(pci_dev))
2883 return;
2884
2885 spin_lock_irq(&dev->power.lock);
2886
2887 if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
2888 __pci_pme_active(pci_dev, true);
2889
2890 spin_unlock_irq(&dev->power.lock);
2891 }
2892
2893 /**
2894 * pci_choose_state - Choose the power state of a PCI device.
2895 * @dev: Target PCI device.
2896 * @state: Target state for the whole system.
2897 *
2898 * Returns PCI power state suitable for @dev and @state.
2899 */
pci_choose_state(struct pci_dev * dev,pm_message_t state)2900 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
2901 {
2902 if (state.event == PM_EVENT_ON)
2903 return PCI_D0;
2904
2905 return pci_target_state(dev, false);
2906 }
2907 EXPORT_SYMBOL(pci_choose_state);
2908
pci_config_pm_runtime_get(struct pci_dev * pdev)2909 void pci_config_pm_runtime_get(struct pci_dev *pdev)
2910 {
2911 struct device *dev = &pdev->dev;
2912 struct device *parent = dev->parent;
2913
2914 if (parent)
2915 pm_runtime_get_sync(parent);
2916 pm_runtime_get_noresume(dev);
2917 /*
2918 * pdev->current_state is set to PCI_D3cold during suspending,
2919 * so wait until suspending completes
2920 */
2921 pm_runtime_barrier(dev);
2922 /*
2923 * Only need to resume devices in D3cold, because config
2924 * registers are still accessible for devices suspended but
2925 * not in D3cold.
2926 */
2927 if (pdev->current_state == PCI_D3cold)
2928 pm_runtime_resume(dev);
2929 }
2930
pci_config_pm_runtime_put(struct pci_dev * pdev)2931 void pci_config_pm_runtime_put(struct pci_dev *pdev)
2932 {
2933 struct device *dev = &pdev->dev;
2934 struct device *parent = dev->parent;
2935
2936 pm_runtime_put(dev);
2937 if (parent)
2938 pm_runtime_put_sync(parent);
2939 }
2940
2941 static const struct dmi_system_id bridge_d3_blacklist[] = {
2942 #ifdef CONFIG_X86
2943 {
2944 /*
2945 * Gigabyte X299 root port is not marked as hotplug capable
2946 * which allows Linux to power manage it. However, this
2947 * confuses the BIOS SMI handler so don't power manage root
2948 * ports on that system.
2949 */
2950 .ident = "X299 DESIGNARE EX-CF",
2951 .matches = {
2952 DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
2953 DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
2954 },
2955 },
2956 {
2957 /*
2958 * Downstream device is not accessible after putting a root port
2959 * into D3cold and back into D0 on Elo Continental Z2 board
2960 */
2961 .ident = "Elo Continental Z2",
2962 .matches = {
2963 DMI_MATCH(DMI_BOARD_VENDOR, "Elo Touch Solutions"),
2964 DMI_MATCH(DMI_BOARD_NAME, "Geminilake"),
2965 DMI_MATCH(DMI_BOARD_VERSION, "Continental Z2"),
2966 },
2967 },
2968 #endif
2969 { }
2970 };
2971
2972 /**
2973 * pci_bridge_d3_possible - Is it possible to put the bridge into D3
2974 * @bridge: Bridge to check
2975 *
2976 * This function checks if it is possible to move the bridge to D3.
2977 * Currently we only allow D3 for recent enough PCIe ports and Thunderbolt.
2978 */
pci_bridge_d3_possible(struct pci_dev * bridge)2979 bool pci_bridge_d3_possible(struct pci_dev *bridge)
2980 {
2981 if (!pci_is_pcie(bridge))
2982 return false;
2983
2984 switch (pci_pcie_type(bridge)) {
2985 case PCI_EXP_TYPE_ROOT_PORT:
2986 case PCI_EXP_TYPE_UPSTREAM:
2987 case PCI_EXP_TYPE_DOWNSTREAM:
2988 if (pci_bridge_d3_disable)
2989 return false;
2990
2991 /*
2992 * Hotplug ports handled by firmware in System Management Mode
2993 * may not be put into D3 by the OS (Thunderbolt on non-Macs).
2994 */
2995 if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
2996 return false;
2997
2998 if (pci_bridge_d3_force)
2999 return true;
3000
3001 /* Even the oldest 2010 Thunderbolt controller supports D3. */
3002 if (bridge->is_thunderbolt)
3003 return true;
3004
3005 /* Platform might know better if the bridge supports D3 */
3006 if (platform_pci_bridge_d3(bridge))
3007 return true;
3008
3009 /*
3010 * Hotplug ports handled natively by the OS were not validated
3011 * by vendors for runtime D3 at least until 2018 because there
3012 * was no OS support.
3013 */
3014 if (bridge->is_hotplug_bridge)
3015 return false;
3016
3017 if (dmi_check_system(bridge_d3_blacklist))
3018 return false;
3019
3020 /*
3021 * It should be safe to put PCIe ports from 2015 or newer
3022 * to D3.
3023 */
3024 if (dmi_get_bios_year() >= 2015)
3025 return true;
3026 break;
3027 }
3028
3029 return false;
3030 }
3031
pci_dev_check_d3cold(struct pci_dev * dev,void * data)3032 static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
3033 {
3034 bool *d3cold_ok = data;
3035
3036 if (/* The device needs to be allowed to go D3cold ... */
3037 dev->no_d3cold || !dev->d3cold_allowed ||
3038
3039 /* ... and if it is wakeup capable to do so from D3cold. */
3040 (device_may_wakeup(&dev->dev) &&
3041 !pci_pme_capable(dev, PCI_D3cold)) ||
3042
3043 /* If it is a bridge it must be allowed to go to D3. */
3044 !pci_power_manageable(dev))
3045
3046 *d3cold_ok = false;
3047
3048 return !*d3cold_ok;
3049 }
3050
3051 /*
3052 * pci_bridge_d3_update - Update bridge D3 capabilities
3053 * @dev: PCI device which is changed
3054 *
3055 * Update upstream bridge PM capabilities accordingly depending on if the
3056 * device PM configuration was changed or the device is being removed. The
3057 * change is also propagated upstream.
3058 */
pci_bridge_d3_update(struct pci_dev * dev)3059 void pci_bridge_d3_update(struct pci_dev *dev)
3060 {
3061 bool remove = !device_is_registered(&dev->dev);
3062 struct pci_dev *bridge;
3063 bool d3cold_ok = true;
3064
3065 bridge = pci_upstream_bridge(dev);
3066 if (!bridge || !pci_bridge_d3_possible(bridge))
3067 return;
3068
3069 /*
3070 * If D3 is currently allowed for the bridge, removing one of its
3071 * children won't change that.
3072 */
3073 if (remove && bridge->bridge_d3)
3074 return;
3075
3076 /*
3077 * If D3 is currently allowed for the bridge and a child is added or
3078 * changed, disallowance of D3 can only be caused by that child, so
3079 * we only need to check that single device, not any of its siblings.
3080 *
3081 * If D3 is currently not allowed for the bridge, checking the device
3082 * first may allow us to skip checking its siblings.
3083 */
3084 if (!remove)
3085 pci_dev_check_d3cold(dev, &d3cold_ok);
3086
3087 /*
3088 * If D3 is currently not allowed for the bridge, this may be caused
3089 * either by the device being changed/removed or any of its siblings,
3090 * so we need to go through all children to find out if one of them
3091 * continues to block D3.
3092 */
3093 if (d3cold_ok && !bridge->bridge_d3)
3094 pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
3095 &d3cold_ok);
3096
3097 if (bridge->bridge_d3 != d3cold_ok) {
3098 bridge->bridge_d3 = d3cold_ok;
3099 /* Propagate change to upstream bridges */
3100 pci_bridge_d3_update(bridge);
3101 }
3102 }
3103
3104 /**
3105 * pci_d3cold_enable - Enable D3cold for device
3106 * @dev: PCI device to handle
3107 *
3108 * This function can be used in drivers to enable D3cold from the device
3109 * they handle. It also updates upstream PCI bridge PM capabilities
3110 * accordingly.
3111 */
pci_d3cold_enable(struct pci_dev * dev)3112 void pci_d3cold_enable(struct pci_dev *dev)
3113 {
3114 if (dev->no_d3cold) {
3115 dev->no_d3cold = false;
3116 pci_bridge_d3_update(dev);
3117 }
3118 }
3119 EXPORT_SYMBOL_GPL(pci_d3cold_enable);
3120
3121 /**
3122 * pci_d3cold_disable - Disable D3cold for device
3123 * @dev: PCI device to handle
3124 *
3125 * This function can be used in drivers to disable D3cold from the device
3126 * they handle. It also updates upstream PCI bridge PM capabilities
3127 * accordingly.
3128 */
pci_d3cold_disable(struct pci_dev * dev)3129 void pci_d3cold_disable(struct pci_dev *dev)
3130 {
3131 if (!dev->no_d3cold) {
3132 dev->no_d3cold = true;
3133 pci_bridge_d3_update(dev);
3134 }
3135 }
3136 EXPORT_SYMBOL_GPL(pci_d3cold_disable);
3137
3138 /**
3139 * pci_pm_init - Initialize PM functions of given PCI device
3140 * @dev: PCI device to handle.
3141 */
pci_pm_init(struct pci_dev * dev)3142 void pci_pm_init(struct pci_dev *dev)
3143 {
3144 int pm;
3145 u16 status;
3146 u16 pmc;
3147
3148 pm_runtime_forbid(&dev->dev);
3149 pm_runtime_set_active(&dev->dev);
3150 pm_runtime_enable(&dev->dev);
3151 device_enable_async_suspend(&dev->dev);
3152 dev->wakeup_prepared = false;
3153
3154 dev->pm_cap = 0;
3155 dev->pme_support = 0;
3156
3157 /* find PCI PM capability in list */
3158 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
3159 if (!pm)
3160 return;
3161 /* Check device's ability to generate PME# */
3162 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
3163
3164 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
3165 pci_err(dev, "unsupported PM cap regs version (%u)\n",
3166 pmc & PCI_PM_CAP_VER_MASK);
3167 return;
3168 }
3169
3170 dev->pm_cap = pm;
3171 dev->d3hot_delay = PCI_PM_D3HOT_WAIT;
3172 dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
3173 dev->bridge_d3 = pci_bridge_d3_possible(dev);
3174 dev->d3cold_allowed = true;
3175
3176 dev->d1_support = false;
3177 dev->d2_support = false;
3178 if (!pci_no_d1d2(dev)) {
3179 if (pmc & PCI_PM_CAP_D1)
3180 dev->d1_support = true;
3181 if (pmc & PCI_PM_CAP_D2)
3182 dev->d2_support = true;
3183
3184 if (dev->d1_support || dev->d2_support)
3185 pci_info(dev, "supports%s%s\n",
3186 dev->d1_support ? " D1" : "",
3187 dev->d2_support ? " D2" : "");
3188 }
3189
3190 pmc &= PCI_PM_CAP_PME_MASK;
3191 if (pmc) {
3192 pci_info(dev, "PME# supported from%s%s%s%s%s\n",
3193 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
3194 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
3195 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
3196 (pmc & PCI_PM_CAP_PME_D3hot) ? " D3hot" : "",
3197 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
3198 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
3199 dev->pme_poll = true;
3200 /*
3201 * Make device's PM flags reflect the wake-up capability, but
3202 * let the user space enable it to wake up the system as needed.
3203 */
3204 device_set_wakeup_capable(&dev->dev, true);
3205 /* Disable the PME# generation functionality */
3206 pci_pme_active(dev, false);
3207 }
3208
3209 pci_read_config_word(dev, PCI_STATUS, &status);
3210 if (status & PCI_STATUS_IMM_READY)
3211 dev->imm_ready = 1;
3212 }
3213
pci_ea_flags(struct pci_dev * dev,u8 prop)3214 static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
3215 {
3216 unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
3217
3218 switch (prop) {
3219 case PCI_EA_P_MEM:
3220 case PCI_EA_P_VF_MEM:
3221 flags |= IORESOURCE_MEM;
3222 break;
3223 case PCI_EA_P_MEM_PREFETCH:
3224 case PCI_EA_P_VF_MEM_PREFETCH:
3225 flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
3226 break;
3227 case PCI_EA_P_IO:
3228 flags |= IORESOURCE_IO;
3229 break;
3230 default:
3231 return 0;
3232 }
3233
3234 return flags;
3235 }
3236
pci_ea_get_resource(struct pci_dev * dev,u8 bei,u8 prop)3237 static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
3238 u8 prop)
3239 {
3240 if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
3241 return &dev->resource[bei];
3242 #ifdef CONFIG_PCI_IOV
3243 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
3244 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
3245 return &dev->resource[PCI_IOV_RESOURCES +
3246 bei - PCI_EA_BEI_VF_BAR0];
3247 #endif
3248 else if (bei == PCI_EA_BEI_ROM)
3249 return &dev->resource[PCI_ROM_RESOURCE];
3250 else
3251 return NULL;
3252 }
3253
3254 /* Read an Enhanced Allocation (EA) entry */
pci_ea_read(struct pci_dev * dev,int offset)3255 static int pci_ea_read(struct pci_dev *dev, int offset)
3256 {
3257 struct resource *res;
3258 int ent_size, ent_offset = offset;
3259 resource_size_t start, end;
3260 unsigned long flags;
3261 u32 dw0, bei, base, max_offset;
3262 u8 prop;
3263 bool support_64 = (sizeof(resource_size_t) >= 8);
3264
3265 pci_read_config_dword(dev, ent_offset, &dw0);
3266 ent_offset += 4;
3267
3268 /* Entry size field indicates DWORDs after 1st */
3269 ent_size = ((dw0 & PCI_EA_ES) + 1) << 2;
3270
3271 if (!(dw0 & PCI_EA_ENABLE)) /* Entry not enabled */
3272 goto out;
3273
3274 bei = (dw0 & PCI_EA_BEI) >> 4;
3275 prop = (dw0 & PCI_EA_PP) >> 8;
3276
3277 /*
3278 * If the Property is in the reserved range, try the Secondary
3279 * Property instead.
3280 */
3281 if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
3282 prop = (dw0 & PCI_EA_SP) >> 16;
3283 if (prop > PCI_EA_P_BRIDGE_IO)
3284 goto out;
3285
3286 res = pci_ea_get_resource(dev, bei, prop);
3287 if (!res) {
3288 pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
3289 goto out;
3290 }
3291
3292 flags = pci_ea_flags(dev, prop);
3293 if (!flags) {
3294 pci_err(dev, "Unsupported EA properties: %#x\n", prop);
3295 goto out;
3296 }
3297
3298 /* Read Base */
3299 pci_read_config_dword(dev, ent_offset, &base);
3300 start = (base & PCI_EA_FIELD_MASK);
3301 ent_offset += 4;
3302
3303 /* Read MaxOffset */
3304 pci_read_config_dword(dev, ent_offset, &max_offset);
3305 ent_offset += 4;
3306
3307 /* Read Base MSBs (if 64-bit entry) */
3308 if (base & PCI_EA_IS_64) {
3309 u32 base_upper;
3310
3311 pci_read_config_dword(dev, ent_offset, &base_upper);
3312 ent_offset += 4;
3313
3314 flags |= IORESOURCE_MEM_64;
3315
3316 /* entry starts above 32-bit boundary, can't use */
3317 if (!support_64 && base_upper)
3318 goto out;
3319
3320 if (support_64)
3321 start |= ((u64)base_upper << 32);
3322 }
3323
3324 end = start + (max_offset | 0x03);
3325
3326 /* Read MaxOffset MSBs (if 64-bit entry) */
3327 if (max_offset & PCI_EA_IS_64) {
3328 u32 max_offset_upper;
3329
3330 pci_read_config_dword(dev, ent_offset, &max_offset_upper);
3331 ent_offset += 4;
3332
3333 flags |= IORESOURCE_MEM_64;
3334
3335 /* entry too big, can't use */
3336 if (!support_64 && max_offset_upper)
3337 goto out;
3338
3339 if (support_64)
3340 end += ((u64)max_offset_upper << 32);
3341 }
3342
3343 if (end < start) {
3344 pci_err(dev, "EA Entry crosses address boundary\n");
3345 goto out;
3346 }
3347
3348 if (ent_size != ent_offset - offset) {
3349 pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n",
3350 ent_size, ent_offset - offset);
3351 goto out;
3352 }
3353
3354 res->name = pci_name(dev);
3355 res->start = start;
3356 res->end = end;
3357 res->flags = flags;
3358
3359 if (bei <= PCI_EA_BEI_BAR5)
3360 pci_info(dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3361 bei, res, prop);
3362 else if (bei == PCI_EA_BEI_ROM)
3363 pci_info(dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
3364 res, prop);
3365 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
3366 pci_info(dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3367 bei - PCI_EA_BEI_VF_BAR0, res, prop);
3368 else
3369 pci_info(dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
3370 bei, res, prop);
3371
3372 out:
3373 return offset + ent_size;
3374 }
3375
3376 /* Enhanced Allocation Initialization */
pci_ea_init(struct pci_dev * dev)3377 void pci_ea_init(struct pci_dev *dev)
3378 {
3379 int ea;
3380 u8 num_ent;
3381 int offset;
3382 int i;
3383
3384 /* find PCI EA capability in list */
3385 ea = pci_find_capability(dev, PCI_CAP_ID_EA);
3386 if (!ea)
3387 return;
3388
3389 /* determine the number of entries */
3390 pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
3391 &num_ent);
3392 num_ent &= PCI_EA_NUM_ENT_MASK;
3393
3394 offset = ea + PCI_EA_FIRST_ENT;
3395
3396 /* Skip DWORD 2 for type 1 functions */
3397 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
3398 offset += 4;
3399
3400 /* parse each EA entry */
3401 for (i = 0; i < num_ent; ++i)
3402 offset = pci_ea_read(dev, offset);
3403 }
3404
pci_add_saved_cap(struct pci_dev * pci_dev,struct pci_cap_saved_state * new_cap)3405 static void pci_add_saved_cap(struct pci_dev *pci_dev,
3406 struct pci_cap_saved_state *new_cap)
3407 {
3408 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
3409 }
3410
3411 /**
3412 * _pci_add_cap_save_buffer - allocate buffer for saving given
3413 * capability registers
3414 * @dev: the PCI device
3415 * @cap: the capability to allocate the buffer for
3416 * @extended: Standard or Extended capability ID
3417 * @size: requested size of the buffer
3418 */
_pci_add_cap_save_buffer(struct pci_dev * dev,u16 cap,bool extended,unsigned int size)3419 static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
3420 bool extended, unsigned int size)
3421 {
3422 int pos;
3423 struct pci_cap_saved_state *save_state;
3424
3425 if (extended)
3426 pos = pci_find_ext_capability(dev, cap);
3427 else
3428 pos = pci_find_capability(dev, cap);
3429
3430 if (!pos)
3431 return 0;
3432
3433 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
3434 if (!save_state)
3435 return -ENOMEM;
3436
3437 save_state->cap.cap_nr = cap;
3438 save_state->cap.cap_extended = extended;
3439 save_state->cap.size = size;
3440 pci_add_saved_cap(dev, save_state);
3441
3442 return 0;
3443 }
3444
pci_add_cap_save_buffer(struct pci_dev * dev,char cap,unsigned int size)3445 int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
3446 {
3447 return _pci_add_cap_save_buffer(dev, cap, false, size);
3448 }
3449
pci_add_ext_cap_save_buffer(struct pci_dev * dev,u16 cap,unsigned int size)3450 int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
3451 {
3452 return _pci_add_cap_save_buffer(dev, cap, true, size);
3453 }
3454
3455 /**
3456 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
3457 * @dev: the PCI device
3458 */
pci_allocate_cap_save_buffers(struct pci_dev * dev)3459 void pci_allocate_cap_save_buffers(struct pci_dev *dev)
3460 {
3461 int error;
3462
3463 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
3464 PCI_EXP_SAVE_REGS * sizeof(u16));
3465 if (error)
3466 pci_err(dev, "unable to preallocate PCI Express save buffer\n");
3467
3468 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
3469 if (error)
3470 pci_err(dev, "unable to preallocate PCI-X save buffer\n");
3471
3472 error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR,
3473 2 * sizeof(u16));
3474 if (error)
3475 pci_err(dev, "unable to allocate suspend buffer for LTR\n");
3476
3477 pci_allocate_vc_save_buffers(dev);
3478 }
3479
pci_free_cap_save_buffers(struct pci_dev * dev)3480 void pci_free_cap_save_buffers(struct pci_dev *dev)
3481 {
3482 struct pci_cap_saved_state *tmp;
3483 struct hlist_node *n;
3484
3485 hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
3486 kfree(tmp);
3487 }
3488
3489 /**
3490 * pci_configure_ari - enable or disable ARI forwarding
3491 * @dev: the PCI device
3492 *
3493 * If @dev and its upstream bridge both support ARI, enable ARI in the
3494 * bridge. Otherwise, disable ARI in the bridge.
3495 */
pci_configure_ari(struct pci_dev * dev)3496 void pci_configure_ari(struct pci_dev *dev)
3497 {
3498 u32 cap;
3499 struct pci_dev *bridge;
3500
3501 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
3502 return;
3503
3504 bridge = dev->bus->self;
3505 if (!bridge)
3506 return;
3507
3508 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3509 if (!(cap & PCI_EXP_DEVCAP2_ARI))
3510 return;
3511
3512 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
3513 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
3514 PCI_EXP_DEVCTL2_ARI);
3515 bridge->ari_enabled = 1;
3516 } else {
3517 pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
3518 PCI_EXP_DEVCTL2_ARI);
3519 bridge->ari_enabled = 0;
3520 }
3521 }
3522
pci_acs_flags_enabled(struct pci_dev * pdev,u16 acs_flags)3523 static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
3524 {
3525 int pos;
3526 u16 cap, ctrl;
3527
3528 pos = pdev->acs_cap;
3529 if (!pos)
3530 return false;
3531
3532 /*
3533 * Except for egress control, capabilities are either required
3534 * or only required if controllable. Features missing from the
3535 * capability field can therefore be assumed as hard-wired enabled.
3536 */
3537 pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
3538 acs_flags &= (cap | PCI_ACS_EC);
3539
3540 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
3541 return (ctrl & acs_flags) == acs_flags;
3542 }
3543
3544 /**
3545 * pci_acs_enabled - test ACS against required flags for a given device
3546 * @pdev: device to test
3547 * @acs_flags: required PCI ACS flags
3548 *
3549 * Return true if the device supports the provided flags. Automatically
3550 * filters out flags that are not implemented on multifunction devices.
3551 *
3552 * Note that this interface checks the effective ACS capabilities of the
3553 * device rather than the actual capabilities. For instance, most single
3554 * function endpoints are not required to support ACS because they have no
3555 * opportunity for peer-to-peer access. We therefore return 'true'
3556 * regardless of whether the device exposes an ACS capability. This makes
3557 * it much easier for callers of this function to ignore the actual type
3558 * or topology of the device when testing ACS support.
3559 */
pci_acs_enabled(struct pci_dev * pdev,u16 acs_flags)3560 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
3561 {
3562 int ret;
3563
3564 ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
3565 if (ret >= 0)
3566 return ret > 0;
3567
3568 /*
3569 * Conventional PCI and PCI-X devices never support ACS, either
3570 * effectively or actually. The shared bus topology implies that
3571 * any device on the bus can receive or snoop DMA.
3572 */
3573 if (!pci_is_pcie(pdev))
3574 return false;
3575
3576 switch (pci_pcie_type(pdev)) {
3577 /*
3578 * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
3579 * but since their primary interface is PCI/X, we conservatively
3580 * handle them as we would a non-PCIe device.
3581 */
3582 case PCI_EXP_TYPE_PCIE_BRIDGE:
3583 /*
3584 * PCIe 3.0, 6.12.1 excludes ACS on these devices. "ACS is never
3585 * applicable... must never implement an ACS Extended Capability...".
3586 * This seems arbitrary, but we take a conservative interpretation
3587 * of this statement.
3588 */
3589 case PCI_EXP_TYPE_PCI_BRIDGE:
3590 case PCI_EXP_TYPE_RC_EC:
3591 return false;
3592 /*
3593 * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should
3594 * implement ACS in order to indicate their peer-to-peer capabilities,
3595 * regardless of whether they are single- or multi-function devices.
3596 */
3597 case PCI_EXP_TYPE_DOWNSTREAM:
3598 case PCI_EXP_TYPE_ROOT_PORT:
3599 return pci_acs_flags_enabled(pdev, acs_flags);
3600 /*
3601 * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
3602 * implemented by the remaining PCIe types to indicate peer-to-peer
3603 * capabilities, but only when they are part of a multifunction
3604 * device. The footnote for section 6.12 indicates the specific
3605 * PCIe types included here.
3606 */
3607 case PCI_EXP_TYPE_ENDPOINT:
3608 case PCI_EXP_TYPE_UPSTREAM:
3609 case PCI_EXP_TYPE_LEG_END:
3610 case PCI_EXP_TYPE_RC_END:
3611 if (!pdev->multifunction)
3612 break;
3613
3614 return pci_acs_flags_enabled(pdev, acs_flags);
3615 }
3616
3617 /*
3618 * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable
3619 * to single function devices with the exception of downstream ports.
3620 */
3621 return true;
3622 }
3623
3624 /**
3625 * pci_acs_path_enabled - test ACS flags from start to end in a hierarchy
3626 * @start: starting downstream device
3627 * @end: ending upstream device or NULL to search to the root bus
3628 * @acs_flags: required flags
3629 *
3630 * Walk up a device tree from start to end testing PCI ACS support. If
3631 * any step along the way does not support the required flags, return false.
3632 */
pci_acs_path_enabled(struct pci_dev * start,struct pci_dev * end,u16 acs_flags)3633 bool pci_acs_path_enabled(struct pci_dev *start,
3634 struct pci_dev *end, u16 acs_flags)
3635 {
3636 struct pci_dev *pdev, *parent = start;
3637
3638 do {
3639 pdev = parent;
3640
3641 if (!pci_acs_enabled(pdev, acs_flags))
3642 return false;
3643
3644 if (pci_is_root_bus(pdev->bus))
3645 return (end == NULL);
3646
3647 parent = pdev->bus->self;
3648 } while (pdev != end);
3649
3650 return true;
3651 }
3652
3653 /**
3654 * pci_acs_init - Initialize ACS if hardware supports it
3655 * @dev: the PCI device
3656 */
pci_acs_init(struct pci_dev * dev)3657 void pci_acs_init(struct pci_dev *dev)
3658 {
3659 dev->acs_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
3660
3661 /*
3662 * Attempt to enable ACS regardless of capability because some Root
3663 * Ports (e.g. those quirked with *_intel_pch_acs_*) do not have
3664 * the standard ACS capability but still support ACS via those
3665 * quirks.
3666 */
3667 pci_enable_acs(dev);
3668 }
3669
3670 /**
3671 * pci_rebar_find_pos - find position of resize ctrl reg for BAR
3672 * @pdev: PCI device
3673 * @bar: BAR to find
3674 *
3675 * Helper to find the position of the ctrl register for a BAR.
3676 * Returns -ENOTSUPP if resizable BARs are not supported at all.
3677 * Returns -ENOENT if no ctrl register for the BAR could be found.
3678 */
pci_rebar_find_pos(struct pci_dev * pdev,int bar)3679 static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
3680 {
3681 unsigned int pos, nbars, i;
3682 u32 ctrl;
3683
3684 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
3685 if (!pos)
3686 return -ENOTSUPP;
3687
3688 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3689 nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
3690 PCI_REBAR_CTRL_NBAR_SHIFT;
3691
3692 for (i = 0; i < nbars; i++, pos += 8) {
3693 int bar_idx;
3694
3695 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3696 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
3697 if (bar_idx == bar)
3698 return pos;
3699 }
3700
3701 return -ENOENT;
3702 }
3703
3704 /**
3705 * pci_rebar_get_possible_sizes - get possible sizes for BAR
3706 * @pdev: PCI device
3707 * @bar: BAR to query
3708 *
3709 * Get the possible sizes of a resizable BAR as bitmask defined in the spec
3710 * (bit 0=1MB, bit 19=512GB). Returns 0 if BAR isn't resizable.
3711 */
pci_rebar_get_possible_sizes(struct pci_dev * pdev,int bar)3712 u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
3713 {
3714 int pos;
3715 u32 cap;
3716
3717 pos = pci_rebar_find_pos(pdev, bar);
3718 if (pos < 0)
3719 return 0;
3720
3721 pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
3722 cap = FIELD_GET(PCI_REBAR_CAP_SIZES, cap);
3723
3724 /* Sapphire RX 5600 XT Pulse has an invalid cap dword for BAR 0 */
3725 if (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x731f &&
3726 bar == 0 && cap == 0x700)
3727 return 0x3f00;
3728
3729 return cap;
3730 }
3731 EXPORT_SYMBOL(pci_rebar_get_possible_sizes);
3732
3733 /**
3734 * pci_rebar_get_current_size - get the current size of a BAR
3735 * @pdev: PCI device
3736 * @bar: BAR to set size to
3737 *
3738 * Read the size of a BAR from the resizable BAR config.
3739 * Returns size if found or negative error code.
3740 */
pci_rebar_get_current_size(struct pci_dev * pdev,int bar)3741 int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
3742 {
3743 int pos;
3744 u32 ctrl;
3745
3746 pos = pci_rebar_find_pos(pdev, bar);
3747 if (pos < 0)
3748 return pos;
3749
3750 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3751 return (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> PCI_REBAR_CTRL_BAR_SHIFT;
3752 }
3753
3754 /**
3755 * pci_rebar_set_size - set a new size for a BAR
3756 * @pdev: PCI device
3757 * @bar: BAR to set size to
3758 * @size: new size as defined in the spec (0=1MB, 19=512GB)
3759 *
3760 * Set the new size of a BAR as defined in the spec.
3761 * Returns zero if resizing was successful, error code otherwise.
3762 */
pci_rebar_set_size(struct pci_dev * pdev,int bar,int size)3763 int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
3764 {
3765 int pos;
3766 u32 ctrl;
3767
3768 pos = pci_rebar_find_pos(pdev, bar);
3769 if (pos < 0)
3770 return pos;
3771
3772 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3773 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
3774 ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
3775 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
3776 return 0;
3777 }
3778
3779 /**
3780 * pci_enable_atomic_ops_to_root - enable AtomicOp requests to root port
3781 * @dev: the PCI device
3782 * @cap_mask: mask of desired AtomicOp sizes, including one or more of:
3783 * PCI_EXP_DEVCAP2_ATOMIC_COMP32
3784 * PCI_EXP_DEVCAP2_ATOMIC_COMP64
3785 * PCI_EXP_DEVCAP2_ATOMIC_COMP128
3786 *
3787 * Return 0 if all upstream bridges support AtomicOp routing, egress
3788 * blocking is disabled on all upstream ports, and the root port supports
3789 * the requested completion capabilities (32-bit, 64-bit and/or 128-bit
3790 * AtomicOp completion), or negative otherwise.
3791 */
pci_enable_atomic_ops_to_root(struct pci_dev * dev,u32 cap_mask)3792 int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
3793 {
3794 struct pci_bus *bus = dev->bus;
3795 struct pci_dev *bridge;
3796 u32 cap, ctl2;
3797
3798 /*
3799 * Per PCIe r5.0, sec 9.3.5.10, the AtomicOp Requester Enable bit
3800 * in Device Control 2 is reserved in VFs and the PF value applies
3801 * to all associated VFs.
3802 */
3803 if (dev->is_virtfn)
3804 return -EINVAL;
3805
3806 if (!pci_is_pcie(dev))
3807 return -EINVAL;
3808
3809 /*
3810 * Per PCIe r4.0, sec 6.15, endpoints and root ports may be
3811 * AtomicOp requesters. For now, we only support endpoints as
3812 * requesters and root ports as completers. No endpoints as
3813 * completers, and no peer-to-peer.
3814 */
3815
3816 switch (pci_pcie_type(dev)) {
3817 case PCI_EXP_TYPE_ENDPOINT:
3818 case PCI_EXP_TYPE_LEG_END:
3819 case PCI_EXP_TYPE_RC_END:
3820 break;
3821 default:
3822 return -EINVAL;
3823 }
3824
3825 while (bus->parent) {
3826 bridge = bus->self;
3827
3828 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3829
3830 switch (pci_pcie_type(bridge)) {
3831 /* Ensure switch ports support AtomicOp routing */
3832 case PCI_EXP_TYPE_UPSTREAM:
3833 case PCI_EXP_TYPE_DOWNSTREAM:
3834 if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
3835 return -EINVAL;
3836 break;
3837
3838 /* Ensure root port supports all the sizes we care about */
3839 case PCI_EXP_TYPE_ROOT_PORT:
3840 if ((cap & cap_mask) != cap_mask)
3841 return -EINVAL;
3842 break;
3843 }
3844
3845 /* Ensure upstream ports don't block AtomicOps on egress */
3846 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) {
3847 pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
3848 &ctl2);
3849 if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
3850 return -EINVAL;
3851 }
3852
3853 bus = bus->parent;
3854 }
3855
3856 pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
3857 PCI_EXP_DEVCTL2_ATOMIC_REQ);
3858 return 0;
3859 }
3860 EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
3861
3862 /**
3863 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
3864 * @dev: the PCI device
3865 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
3866 *
3867 * Perform INTx swizzling for a device behind one level of bridge. This is
3868 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
3869 * behind bridges on add-in cards. For devices with ARI enabled, the slot
3870 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
3871 * the PCI Express Base Specification, Revision 2.1)
3872 */
pci_swizzle_interrupt_pin(const struct pci_dev * dev,u8 pin)3873 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
3874 {
3875 int slot;
3876
3877 if (pci_ari_enabled(dev->bus))
3878 slot = 0;
3879 else
3880 slot = PCI_SLOT(dev->devfn);
3881
3882 return (((pin - 1) + slot) % 4) + 1;
3883 }
3884
pci_get_interrupt_pin(struct pci_dev * dev,struct pci_dev ** bridge)3885 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
3886 {
3887 u8 pin;
3888
3889 pin = dev->pin;
3890 if (!pin)
3891 return -1;
3892
3893 while (!pci_is_root_bus(dev->bus)) {
3894 pin = pci_swizzle_interrupt_pin(dev, pin);
3895 dev = dev->bus->self;
3896 }
3897 *bridge = dev;
3898 return pin;
3899 }
3900
3901 /**
3902 * pci_common_swizzle - swizzle INTx all the way to root bridge
3903 * @dev: the PCI device
3904 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
3905 *
3906 * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
3907 * bridges all the way up to a PCI root bus.
3908 */
pci_common_swizzle(struct pci_dev * dev,u8 * pinp)3909 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
3910 {
3911 u8 pin = *pinp;
3912
3913 while (!pci_is_root_bus(dev->bus)) {
3914 pin = pci_swizzle_interrupt_pin(dev, pin);
3915 dev = dev->bus->self;
3916 }
3917 *pinp = pin;
3918 return PCI_SLOT(dev->devfn);
3919 }
3920 EXPORT_SYMBOL_GPL(pci_common_swizzle);
3921
3922 /**
3923 * pci_release_region - Release a PCI bar
3924 * @pdev: PCI device whose resources were previously reserved by
3925 * pci_request_region()
3926 * @bar: BAR to release
3927 *
3928 * Releases the PCI I/O and memory resources previously reserved by a
3929 * successful call to pci_request_region(). Call this function only
3930 * after all use of the PCI regions has ceased.
3931 */
pci_release_region(struct pci_dev * pdev,int bar)3932 void pci_release_region(struct pci_dev *pdev, int bar)
3933 {
3934 struct pci_devres *dr;
3935
3936 if (pci_resource_len(pdev, bar) == 0)
3937 return;
3938 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
3939 release_region(pci_resource_start(pdev, bar),
3940 pci_resource_len(pdev, bar));
3941 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
3942 release_mem_region(pci_resource_start(pdev, bar),
3943 pci_resource_len(pdev, bar));
3944
3945 dr = find_pci_dr(pdev);
3946 if (dr)
3947 dr->region_mask &= ~(1 << bar);
3948 }
3949 EXPORT_SYMBOL(pci_release_region);
3950
3951 /**
3952 * __pci_request_region - Reserved PCI I/O and memory resource
3953 * @pdev: PCI device whose resources are to be reserved
3954 * @bar: BAR to be reserved
3955 * @res_name: Name to be associated with resource.
3956 * @exclusive: whether the region access is exclusive or not
3957 *
3958 * Mark the PCI region associated with PCI device @pdev BAR @bar as
3959 * being reserved by owner @res_name. Do not access any
3960 * address inside the PCI regions unless this call returns
3961 * successfully.
3962 *
3963 * If @exclusive is set, then the region is marked so that userspace
3964 * is explicitly not allowed to map the resource via /dev/mem or
3965 * sysfs MMIO access.
3966 *
3967 * Returns 0 on success, or %EBUSY on error. A warning
3968 * message is also printed on failure.
3969 */
__pci_request_region(struct pci_dev * pdev,int bar,const char * res_name,int exclusive)3970 static int __pci_request_region(struct pci_dev *pdev, int bar,
3971 const char *res_name, int exclusive)
3972 {
3973 struct pci_devres *dr;
3974
3975 if (pci_resource_len(pdev, bar) == 0)
3976 return 0;
3977
3978 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
3979 if (!request_region(pci_resource_start(pdev, bar),
3980 pci_resource_len(pdev, bar), res_name))
3981 goto err_out;
3982 } else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
3983 if (!__request_mem_region(pci_resource_start(pdev, bar),
3984 pci_resource_len(pdev, bar), res_name,
3985 exclusive))
3986 goto err_out;
3987 }
3988
3989 dr = find_pci_dr(pdev);
3990 if (dr)
3991 dr->region_mask |= 1 << bar;
3992
3993 return 0;
3994
3995 err_out:
3996 pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar,
3997 &pdev->resource[bar]);
3998 return -EBUSY;
3999 }
4000
4001 /**
4002 * pci_request_region - Reserve PCI I/O and memory resource
4003 * @pdev: PCI device whose resources are to be reserved
4004 * @bar: BAR to be reserved
4005 * @res_name: Name to be associated with resource
4006 *
4007 * Mark the PCI region associated with PCI device @pdev BAR @bar as
4008 * being reserved by owner @res_name. Do not access any
4009 * address inside the PCI regions unless this call returns
4010 * successfully.
4011 *
4012 * Returns 0 on success, or %EBUSY on error. A warning
4013 * message is also printed on failure.
4014 */
pci_request_region(struct pci_dev * pdev,int bar,const char * res_name)4015 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
4016 {
4017 return __pci_request_region(pdev, bar, res_name, 0);
4018 }
4019 EXPORT_SYMBOL(pci_request_region);
4020
4021 /**
4022 * pci_release_selected_regions - Release selected PCI I/O and memory resources
4023 * @pdev: PCI device whose resources were previously reserved
4024 * @bars: Bitmask of BARs to be released
4025 *
4026 * Release selected PCI I/O and memory resources previously reserved.
4027 * Call this function only after all use of the PCI regions has ceased.
4028 */
pci_release_selected_regions(struct pci_dev * pdev,int bars)4029 void pci_release_selected_regions(struct pci_dev *pdev, int bars)
4030 {
4031 int i;
4032
4033 for (i = 0; i < PCI_STD_NUM_BARS; i++)
4034 if (bars & (1 << i))
4035 pci_release_region(pdev, i);
4036 }
4037 EXPORT_SYMBOL(pci_release_selected_regions);
4038
__pci_request_selected_regions(struct pci_dev * pdev,int bars,const char * res_name,int excl)4039 static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
4040 const char *res_name, int excl)
4041 {
4042 int i;
4043
4044 for (i = 0; i < PCI_STD_NUM_BARS; i++)
4045 if (bars & (1 << i))
4046 if (__pci_request_region(pdev, i, res_name, excl))
4047 goto err_out;
4048 return 0;
4049
4050 err_out:
4051 while (--i >= 0)
4052 if (bars & (1 << i))
4053 pci_release_region(pdev, i);
4054
4055 return -EBUSY;
4056 }
4057
4058
4059 /**
4060 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
4061 * @pdev: PCI device whose resources are to be reserved
4062 * @bars: Bitmask of BARs to be requested
4063 * @res_name: Name to be associated with resource
4064 */
pci_request_selected_regions(struct pci_dev * pdev,int bars,const char * res_name)4065 int pci_request_selected_regions(struct pci_dev *pdev, int bars,
4066 const char *res_name)
4067 {
4068 return __pci_request_selected_regions(pdev, bars, res_name, 0);
4069 }
4070 EXPORT_SYMBOL(pci_request_selected_regions);
4071
pci_request_selected_regions_exclusive(struct pci_dev * pdev,int bars,const char * res_name)4072 int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
4073 const char *res_name)
4074 {
4075 return __pci_request_selected_regions(pdev, bars, res_name,
4076 IORESOURCE_EXCLUSIVE);
4077 }
4078 EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
4079
4080 /**
4081 * pci_release_regions - Release reserved PCI I/O and memory resources
4082 * @pdev: PCI device whose resources were previously reserved by
4083 * pci_request_regions()
4084 *
4085 * Releases all PCI I/O and memory resources previously reserved by a
4086 * successful call to pci_request_regions(). Call this function only
4087 * after all use of the PCI regions has ceased.
4088 */
4089
pci_release_regions(struct pci_dev * pdev)4090 void pci_release_regions(struct pci_dev *pdev)
4091 {
4092 pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1);
4093 }
4094 EXPORT_SYMBOL(pci_release_regions);
4095
4096 /**
4097 * pci_request_regions - Reserve PCI I/O and memory resources
4098 * @pdev: PCI device whose resources are to be reserved
4099 * @res_name: Name to be associated with resource.
4100 *
4101 * Mark all PCI regions associated with PCI device @pdev as
4102 * being reserved by owner @res_name. Do not access any
4103 * address inside the PCI regions unless this call returns
4104 * successfully.
4105 *
4106 * Returns 0 on success, or %EBUSY on error. A warning
4107 * message is also printed on failure.
4108 */
pci_request_regions(struct pci_dev * pdev,const char * res_name)4109 int pci_request_regions(struct pci_dev *pdev, const char *res_name)
4110 {
4111 return pci_request_selected_regions(pdev,
4112 ((1 << PCI_STD_NUM_BARS) - 1), res_name);
4113 }
4114 EXPORT_SYMBOL(pci_request_regions);
4115
4116 /**
4117 * pci_request_regions_exclusive - Reserve PCI I/O and memory resources
4118 * @pdev: PCI device whose resources are to be reserved
4119 * @res_name: Name to be associated with resource.
4120 *
4121 * Mark all PCI regions associated with PCI device @pdev as being reserved
4122 * by owner @res_name. Do not access any address inside the PCI regions
4123 * unless this call returns successfully.
4124 *
4125 * pci_request_regions_exclusive() will mark the region so that /dev/mem
4126 * and the sysfs MMIO access will not be allowed.
4127 *
4128 * Returns 0 on success, or %EBUSY on error. A warning message is also
4129 * printed on failure.
4130 */
pci_request_regions_exclusive(struct pci_dev * pdev,const char * res_name)4131 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
4132 {
4133 return pci_request_selected_regions_exclusive(pdev,
4134 ((1 << PCI_STD_NUM_BARS) - 1), res_name);
4135 }
4136 EXPORT_SYMBOL(pci_request_regions_exclusive);
4137
4138 /*
4139 * Record the PCI IO range (expressed as CPU physical address + size).
4140 * Return a negative value if an error has occurred, zero otherwise
4141 */
pci_register_io_range(struct fwnode_handle * fwnode,phys_addr_t addr,resource_size_t size)4142 int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
4143 resource_size_t size)
4144 {
4145 int ret = 0;
4146 #ifdef PCI_IOBASE
4147 struct logic_pio_hwaddr *range;
4148
4149 if (!size || addr + size < addr)
4150 return -EINVAL;
4151
4152 range = kzalloc(sizeof(*range), GFP_ATOMIC);
4153 if (!range)
4154 return -ENOMEM;
4155
4156 range->fwnode = fwnode;
4157 range->size = size;
4158 range->hw_start = addr;
4159 range->flags = LOGIC_PIO_CPU_MMIO;
4160
4161 ret = logic_pio_register_range(range);
4162 if (ret)
4163 kfree(range);
4164
4165 /* Ignore duplicates due to deferred probing */
4166 if (ret == -EEXIST)
4167 ret = 0;
4168 #endif
4169
4170 return ret;
4171 }
4172
pci_pio_to_address(unsigned long pio)4173 phys_addr_t pci_pio_to_address(unsigned long pio)
4174 {
4175 phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;
4176
4177 #ifdef PCI_IOBASE
4178 if (pio >= MMIO_UPPER_LIMIT)
4179 return address;
4180
4181 address = logic_pio_to_hwaddr(pio);
4182 #endif
4183
4184 return address;
4185 }
4186 EXPORT_SYMBOL_GPL(pci_pio_to_address);
4187
pci_address_to_pio(phys_addr_t address)4188 unsigned long __weak pci_address_to_pio(phys_addr_t address)
4189 {
4190 #ifdef PCI_IOBASE
4191 return logic_pio_trans_cpuaddr(address);
4192 #else
4193 if (address > IO_SPACE_LIMIT)
4194 return (unsigned long)-1;
4195
4196 return (unsigned long) address;
4197 #endif
4198 }
4199
4200 /**
4201 * pci_remap_iospace - Remap the memory mapped I/O space
4202 * @res: Resource describing the I/O space
4203 * @phys_addr: physical address of range to be mapped
4204 *
4205 * Remap the memory mapped I/O space described by the @res and the CPU
4206 * physical address @phys_addr into virtual address space. Only
4207 * architectures that have memory mapped IO functions defined (and the
4208 * PCI_IOBASE value defined) should call this function.
4209 */
4210 #ifndef pci_remap_iospace
pci_remap_iospace(const struct resource * res,phys_addr_t phys_addr)4211 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
4212 {
4213 #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4214 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4215
4216 if (!(res->flags & IORESOURCE_IO))
4217 return -EINVAL;
4218
4219 if (res->end > IO_SPACE_LIMIT)
4220 return -EINVAL;
4221
4222 return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
4223 pgprot_device(PAGE_KERNEL));
4224 #else
4225 /*
4226 * This architecture does not have memory mapped I/O space,
4227 * so this function should never be called
4228 */
4229 WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
4230 return -ENODEV;
4231 #endif
4232 }
4233 EXPORT_SYMBOL(pci_remap_iospace);
4234 #endif
4235
4236 /**
4237 * pci_unmap_iospace - Unmap the memory mapped I/O space
4238 * @res: resource to be unmapped
4239 *
4240 * Unmap the CPU virtual address @res from virtual address space. Only
4241 * architectures that have memory mapped IO functions defined (and the
4242 * PCI_IOBASE value defined) should call this function.
4243 */
pci_unmap_iospace(struct resource * res)4244 void pci_unmap_iospace(struct resource *res)
4245 {
4246 #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4247 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4248
4249 vunmap_range(vaddr, vaddr + resource_size(res));
4250 #endif
4251 }
4252 EXPORT_SYMBOL(pci_unmap_iospace);
4253
devm_pci_unmap_iospace(struct device * dev,void * ptr)4254 static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
4255 {
4256 struct resource **res = ptr;
4257
4258 pci_unmap_iospace(*res);
4259 }
4260
4261 /**
4262 * devm_pci_remap_iospace - Managed pci_remap_iospace()
4263 * @dev: Generic device to remap IO address for
4264 * @res: Resource describing the I/O space
4265 * @phys_addr: physical address of range to be mapped
4266 *
4267 * Managed pci_remap_iospace(). Map is automatically unmapped on driver
4268 * detach.
4269 */
devm_pci_remap_iospace(struct device * dev,const struct resource * res,phys_addr_t phys_addr)4270 int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
4271 phys_addr_t phys_addr)
4272 {
4273 const struct resource **ptr;
4274 int error;
4275
4276 ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
4277 if (!ptr)
4278 return -ENOMEM;
4279
4280 error = pci_remap_iospace(res, phys_addr);
4281 if (error) {
4282 devres_free(ptr);
4283 } else {
4284 *ptr = res;
4285 devres_add(dev, ptr);
4286 }
4287
4288 return error;
4289 }
4290 EXPORT_SYMBOL(devm_pci_remap_iospace);
4291
4292 /**
4293 * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
4294 * @dev: Generic device to remap IO address for
4295 * @offset: Resource address to map
4296 * @size: Size of map
4297 *
4298 * Managed pci_remap_cfgspace(). Map is automatically unmapped on driver
4299 * detach.
4300 */
devm_pci_remap_cfgspace(struct device * dev,resource_size_t offset,resource_size_t size)4301 void __iomem *devm_pci_remap_cfgspace(struct device *dev,
4302 resource_size_t offset,
4303 resource_size_t size)
4304 {
4305 void __iomem **ptr, *addr;
4306
4307 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
4308 if (!ptr)
4309 return NULL;
4310
4311 addr = pci_remap_cfgspace(offset, size);
4312 if (addr) {
4313 *ptr = addr;
4314 devres_add(dev, ptr);
4315 } else
4316 devres_free(ptr);
4317
4318 return addr;
4319 }
4320 EXPORT_SYMBOL(devm_pci_remap_cfgspace);
4321
4322 /**
4323 * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource
4324 * @dev: generic device to handle the resource for
4325 * @res: configuration space resource to be handled
4326 *
4327 * Checks that a resource is a valid memory region, requests the memory
4328 * region and ioremaps with pci_remap_cfgspace() API that ensures the
4329 * proper PCI configuration space memory attributes are guaranteed.
4330 *
4331 * All operations are managed and will be undone on driver detach.
4332 *
4333 * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
4334 * on failure. Usage example::
4335 *
4336 * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4337 * base = devm_pci_remap_cfg_resource(&pdev->dev, res);
4338 * if (IS_ERR(base))
4339 * return PTR_ERR(base);
4340 */
devm_pci_remap_cfg_resource(struct device * dev,struct resource * res)4341 void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
4342 struct resource *res)
4343 {
4344 resource_size_t size;
4345 const char *name;
4346 void __iomem *dest_ptr;
4347
4348 BUG_ON(!dev);
4349
4350 if (!res || resource_type(res) != IORESOURCE_MEM) {
4351 dev_err(dev, "invalid resource\n");
4352 return IOMEM_ERR_PTR(-EINVAL);
4353 }
4354
4355 size = resource_size(res);
4356
4357 if (res->name)
4358 name = devm_kasprintf(dev, GFP_KERNEL, "%s %s", dev_name(dev),
4359 res->name);
4360 else
4361 name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
4362 if (!name)
4363 return IOMEM_ERR_PTR(-ENOMEM);
4364
4365 if (!devm_request_mem_region(dev, res->start, size, name)) {
4366 dev_err(dev, "can't request region for resource %pR\n", res);
4367 return IOMEM_ERR_PTR(-EBUSY);
4368 }
4369
4370 dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
4371 if (!dest_ptr) {
4372 dev_err(dev, "ioremap failed for resource %pR\n", res);
4373 devm_release_mem_region(dev, res->start, size);
4374 dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
4375 }
4376
4377 return dest_ptr;
4378 }
4379 EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
4380
__pci_set_master(struct pci_dev * dev,bool enable)4381 static void __pci_set_master(struct pci_dev *dev, bool enable)
4382 {
4383 u16 old_cmd, cmd;
4384
4385 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
4386 if (enable)
4387 cmd = old_cmd | PCI_COMMAND_MASTER;
4388 else
4389 cmd = old_cmd & ~PCI_COMMAND_MASTER;
4390 if (cmd != old_cmd) {
4391 pci_dbg(dev, "%s bus mastering\n",
4392 enable ? "enabling" : "disabling");
4393 pci_write_config_word(dev, PCI_COMMAND, cmd);
4394 }
4395 dev->is_busmaster = enable;
4396 }
4397
4398 /**
4399 * pcibios_setup - process "pci=" kernel boot arguments
4400 * @str: string used to pass in "pci=" kernel boot arguments
4401 *
4402 * Process kernel boot arguments. This is the default implementation.
4403 * Architecture specific implementations can override this as necessary.
4404 */
pcibios_setup(char * str)4405 char * __weak __init pcibios_setup(char *str)
4406 {
4407 return str;
4408 }
4409
4410 /**
4411 * pcibios_set_master - enable PCI bus-mastering for device dev
4412 * @dev: the PCI device to enable
4413 *
4414 * Enables PCI bus-mastering for the device. This is the default
4415 * implementation. Architecture specific implementations can override
4416 * this if necessary.
4417 */
pcibios_set_master(struct pci_dev * dev)4418 void __weak pcibios_set_master(struct pci_dev *dev)
4419 {
4420 u8 lat;
4421
4422 /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
4423 if (pci_is_pcie(dev))
4424 return;
4425
4426 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
4427 if (lat < 16)
4428 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
4429 else if (lat > pcibios_max_latency)
4430 lat = pcibios_max_latency;
4431 else
4432 return;
4433
4434 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
4435 }
4436
4437 /**
4438 * pci_set_master - enables bus-mastering for device dev
4439 * @dev: the PCI device to enable
4440 *
4441 * Enables bus-mastering on the device and calls pcibios_set_master()
4442 * to do the needed arch specific settings.
4443 */
pci_set_master(struct pci_dev * dev)4444 void pci_set_master(struct pci_dev *dev)
4445 {
4446 __pci_set_master(dev, true);
4447 pcibios_set_master(dev);
4448 }
4449 EXPORT_SYMBOL(pci_set_master);
4450
4451 /**
4452 * pci_clear_master - disables bus-mastering for device dev
4453 * @dev: the PCI device to disable
4454 */
pci_clear_master(struct pci_dev * dev)4455 void pci_clear_master(struct pci_dev *dev)
4456 {
4457 __pci_set_master(dev, false);
4458 }
4459 EXPORT_SYMBOL(pci_clear_master);
4460
4461 /**
4462 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
4463 * @dev: the PCI device for which MWI is to be enabled
4464 *
4465 * Helper function for pci_set_mwi.
4466 * Originally copied from drivers/net/acenic.c.
4467 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
4468 *
4469 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4470 */
pci_set_cacheline_size(struct pci_dev * dev)4471 int pci_set_cacheline_size(struct pci_dev *dev)
4472 {
4473 u8 cacheline_size;
4474
4475 if (!pci_cache_line_size)
4476 return -EINVAL;
4477
4478 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
4479 equal to or multiple of the right value. */
4480 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4481 if (cacheline_size >= pci_cache_line_size &&
4482 (cacheline_size % pci_cache_line_size) == 0)
4483 return 0;
4484
4485 /* Write the correct value. */
4486 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
4487 /* Read it back. */
4488 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4489 if (cacheline_size == pci_cache_line_size)
4490 return 0;
4491
4492 pci_dbg(dev, "cache line size of %d is not supported\n",
4493 pci_cache_line_size << 2);
4494
4495 return -EINVAL;
4496 }
4497 EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
4498
4499 /**
4500 * pci_set_mwi - enables memory-write-invalidate PCI transaction
4501 * @dev: the PCI device for which MWI is enabled
4502 *
4503 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4504 *
4505 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4506 */
pci_set_mwi(struct pci_dev * dev)4507 int pci_set_mwi(struct pci_dev *dev)
4508 {
4509 #ifdef PCI_DISABLE_MWI
4510 return 0;
4511 #else
4512 int rc;
4513 u16 cmd;
4514
4515 rc = pci_set_cacheline_size(dev);
4516 if (rc)
4517 return rc;
4518
4519 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4520 if (!(cmd & PCI_COMMAND_INVALIDATE)) {
4521 pci_dbg(dev, "enabling Mem-Wr-Inval\n");
4522 cmd |= PCI_COMMAND_INVALIDATE;
4523 pci_write_config_word(dev, PCI_COMMAND, cmd);
4524 }
4525 return 0;
4526 #endif
4527 }
4528 EXPORT_SYMBOL(pci_set_mwi);
4529
4530 /**
4531 * pcim_set_mwi - a device-managed pci_set_mwi()
4532 * @dev: the PCI device for which MWI is enabled
4533 *
4534 * Managed pci_set_mwi().
4535 *
4536 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4537 */
pcim_set_mwi(struct pci_dev * dev)4538 int pcim_set_mwi(struct pci_dev *dev)
4539 {
4540 struct pci_devres *dr;
4541
4542 dr = find_pci_dr(dev);
4543 if (!dr)
4544 return -ENOMEM;
4545
4546 dr->mwi = 1;
4547 return pci_set_mwi(dev);
4548 }
4549 EXPORT_SYMBOL(pcim_set_mwi);
4550
4551 /**
4552 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
4553 * @dev: the PCI device for which MWI is enabled
4554 *
4555 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4556 * Callers are not required to check the return value.
4557 *
4558 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4559 */
pci_try_set_mwi(struct pci_dev * dev)4560 int pci_try_set_mwi(struct pci_dev *dev)
4561 {
4562 #ifdef PCI_DISABLE_MWI
4563 return 0;
4564 #else
4565 return pci_set_mwi(dev);
4566 #endif
4567 }
4568 EXPORT_SYMBOL(pci_try_set_mwi);
4569
4570 /**
4571 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
4572 * @dev: the PCI device to disable
4573 *
4574 * Disables PCI Memory-Write-Invalidate transaction on the device
4575 */
pci_clear_mwi(struct pci_dev * dev)4576 void pci_clear_mwi(struct pci_dev *dev)
4577 {
4578 #ifndef PCI_DISABLE_MWI
4579 u16 cmd;
4580
4581 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4582 if (cmd & PCI_COMMAND_INVALIDATE) {
4583 cmd &= ~PCI_COMMAND_INVALIDATE;
4584 pci_write_config_word(dev, PCI_COMMAND, cmd);
4585 }
4586 #endif
4587 }
4588 EXPORT_SYMBOL(pci_clear_mwi);
4589
4590 /**
4591 * pci_disable_parity - disable parity checking for device
4592 * @dev: the PCI device to operate on
4593 *
4594 * Disable parity checking for device @dev
4595 */
pci_disable_parity(struct pci_dev * dev)4596 void pci_disable_parity(struct pci_dev *dev)
4597 {
4598 u16 cmd;
4599
4600 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4601 if (cmd & PCI_COMMAND_PARITY) {
4602 cmd &= ~PCI_COMMAND_PARITY;
4603 pci_write_config_word(dev, PCI_COMMAND, cmd);
4604 }
4605 }
4606
4607 /**
4608 * pci_intx - enables/disables PCI INTx for device dev
4609 * @pdev: the PCI device to operate on
4610 * @enable: boolean: whether to enable or disable PCI INTx
4611 *
4612 * Enables/disables PCI INTx for device @pdev
4613 */
pci_intx(struct pci_dev * pdev,int enable)4614 void pci_intx(struct pci_dev *pdev, int enable)
4615 {
4616 u16 pci_command, new;
4617
4618 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
4619
4620 if (enable)
4621 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
4622 else
4623 new = pci_command | PCI_COMMAND_INTX_DISABLE;
4624
4625 if (new != pci_command) {
4626 struct pci_devres *dr;
4627
4628 pci_write_config_word(pdev, PCI_COMMAND, new);
4629
4630 dr = find_pci_dr(pdev);
4631 if (dr && !dr->restore_intx) {
4632 dr->restore_intx = 1;
4633 dr->orig_intx = !enable;
4634 }
4635 }
4636 }
4637 EXPORT_SYMBOL_GPL(pci_intx);
4638
pci_check_and_set_intx_mask(struct pci_dev * dev,bool mask)4639 static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
4640 {
4641 struct pci_bus *bus = dev->bus;
4642 bool mask_updated = true;
4643 u32 cmd_status_dword;
4644 u16 origcmd, newcmd;
4645 unsigned long flags;
4646 bool irq_pending;
4647
4648 /*
4649 * We do a single dword read to retrieve both command and status.
4650 * Document assumptions that make this possible.
4651 */
4652 BUILD_BUG_ON(PCI_COMMAND % 4);
4653 BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
4654
4655 raw_spin_lock_irqsave(&pci_lock, flags);
4656
4657 bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
4658
4659 irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
4660
4661 /*
4662 * Check interrupt status register to see whether our device
4663 * triggered the interrupt (when masking) or the next IRQ is
4664 * already pending (when unmasking).
4665 */
4666 if (mask != irq_pending) {
4667 mask_updated = false;
4668 goto done;
4669 }
4670
4671 origcmd = cmd_status_dword;
4672 newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
4673 if (mask)
4674 newcmd |= PCI_COMMAND_INTX_DISABLE;
4675 if (newcmd != origcmd)
4676 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
4677
4678 done:
4679 raw_spin_unlock_irqrestore(&pci_lock, flags);
4680
4681 return mask_updated;
4682 }
4683
4684 /**
4685 * pci_check_and_mask_intx - mask INTx on pending interrupt
4686 * @dev: the PCI device to operate on
4687 *
4688 * Check if the device dev has its INTx line asserted, mask it and return
4689 * true in that case. False is returned if no interrupt was pending.
4690 */
pci_check_and_mask_intx(struct pci_dev * dev)4691 bool pci_check_and_mask_intx(struct pci_dev *dev)
4692 {
4693 return pci_check_and_set_intx_mask(dev, true);
4694 }
4695 EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
4696
4697 /**
4698 * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending
4699 * @dev: the PCI device to operate on
4700 *
4701 * Check if the device dev has its INTx line asserted, unmask it if not and
4702 * return true. False is returned and the mask remains active if there was
4703 * still an interrupt pending.
4704 */
pci_check_and_unmask_intx(struct pci_dev * dev)4705 bool pci_check_and_unmask_intx(struct pci_dev *dev)
4706 {
4707 return pci_check_and_set_intx_mask(dev, false);
4708 }
4709 EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
4710
4711 /**
4712 * pci_wait_for_pending_transaction - wait for pending transaction
4713 * @dev: the PCI device to operate on
4714 *
4715 * Return 0 if transaction is pending 1 otherwise.
4716 */
pci_wait_for_pending_transaction(struct pci_dev * dev)4717 int pci_wait_for_pending_transaction(struct pci_dev *dev)
4718 {
4719 if (!pci_is_pcie(dev))
4720 return 1;
4721
4722 return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
4723 PCI_EXP_DEVSTA_TRPND);
4724 }
4725 EXPORT_SYMBOL(pci_wait_for_pending_transaction);
4726
4727 /**
4728 * pcie_flr - initiate a PCIe function level reset
4729 * @dev: device to reset
4730 *
4731 * Initiate a function level reset unconditionally on @dev without
4732 * checking any flags and DEVCAP
4733 */
pcie_flr(struct pci_dev * dev)4734 int pcie_flr(struct pci_dev *dev)
4735 {
4736 if (!pci_wait_for_pending_transaction(dev))
4737 pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
4738
4739 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
4740
4741 if (dev->imm_ready)
4742 return 0;
4743
4744 /*
4745 * Per PCIe r4.0, sec 6.6.2, a device must complete an FLR within
4746 * 100ms, but may silently discard requests while the FLR is in
4747 * progress. Wait 100ms before trying to access the device.
4748 */
4749 msleep(100);
4750
4751 return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS);
4752 }
4753 EXPORT_SYMBOL_GPL(pcie_flr);
4754
4755 /**
4756 * pcie_reset_flr - initiate a PCIe function level reset
4757 * @dev: device to reset
4758 * @probe: if true, return 0 if device can be reset this way
4759 *
4760 * Initiate a function level reset on @dev.
4761 */
pcie_reset_flr(struct pci_dev * dev,bool probe)4762 int pcie_reset_flr(struct pci_dev *dev, bool probe)
4763 {
4764 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4765 return -ENOTTY;
4766
4767 if (!(dev->devcap & PCI_EXP_DEVCAP_FLR))
4768 return -ENOTTY;
4769
4770 if (probe)
4771 return 0;
4772
4773 return pcie_flr(dev);
4774 }
4775 EXPORT_SYMBOL_GPL(pcie_reset_flr);
4776
pci_af_flr(struct pci_dev * dev,bool probe)4777 static int pci_af_flr(struct pci_dev *dev, bool probe)
4778 {
4779 int pos;
4780 u8 cap;
4781
4782 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
4783 if (!pos)
4784 return -ENOTTY;
4785
4786 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4787 return -ENOTTY;
4788
4789 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
4790 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
4791 return -ENOTTY;
4792
4793 if (probe)
4794 return 0;
4795
4796 /*
4797 * Wait for Transaction Pending bit to clear. A word-aligned test
4798 * is used, so we use the control offset rather than status and shift
4799 * the test bit to match.
4800 */
4801 if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
4802 PCI_AF_STATUS_TP << 8))
4803 pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
4804
4805 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
4806
4807 if (dev->imm_ready)
4808 return 0;
4809
4810 /*
4811 * Per Advanced Capabilities for Conventional PCI ECN, 13 April 2006,
4812 * updated 27 July 2006; a device must complete an FLR within
4813 * 100ms, but may silently discard requests while the FLR is in
4814 * progress. Wait 100ms before trying to access the device.
4815 */
4816 msleep(100);
4817
4818 return pci_dev_wait(dev, "AF_FLR", PCIE_RESET_READY_POLL_MS);
4819 }
4820
4821 /**
4822 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
4823 * @dev: Device to reset.
4824 * @probe: if true, return 0 if the device can be reset this way.
4825 *
4826 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
4827 * unset, it will be reinitialized internally when going from PCI_D3hot to
4828 * PCI_D0. If that's the case and the device is not in a low-power state
4829 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
4830 *
4831 * NOTE: This causes the caller to sleep for twice the device power transition
4832 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
4833 * by default (i.e. unless the @dev's d3hot_delay field has a different value).
4834 * Moreover, only devices in D0 can be reset by this function.
4835 */
pci_pm_reset(struct pci_dev * dev,bool probe)4836 static int pci_pm_reset(struct pci_dev *dev, bool probe)
4837 {
4838 u16 csr;
4839
4840 if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
4841 return -ENOTTY;
4842
4843 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
4844 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
4845 return -ENOTTY;
4846
4847 if (probe)
4848 return 0;
4849
4850 if (dev->current_state != PCI_D0)
4851 return -EINVAL;
4852
4853 csr &= ~PCI_PM_CTRL_STATE_MASK;
4854 csr |= PCI_D3hot;
4855 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4856 pci_dev_d3_sleep(dev);
4857
4858 csr &= ~PCI_PM_CTRL_STATE_MASK;
4859 csr |= PCI_D0;
4860 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4861 pci_dev_d3_sleep(dev);
4862
4863 return pci_dev_wait(dev, "PM D3hot->D0", PCIE_RESET_READY_POLL_MS);
4864 }
4865
4866 /**
4867 * pcie_wait_for_link_delay - Wait until link is active or inactive
4868 * @pdev: Bridge device
4869 * @active: waiting for active or inactive?
4870 * @delay: Delay to wait after link has become active (in ms)
4871 *
4872 * Use this to wait till link becomes active or inactive.
4873 */
pcie_wait_for_link_delay(struct pci_dev * pdev,bool active,int delay)4874 static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
4875 int delay)
4876 {
4877 int timeout = 1000;
4878 bool ret;
4879 u16 lnk_status;
4880
4881 /*
4882 * Some controllers might not implement link active reporting. In this
4883 * case, we wait for 1000 ms + any delay requested by the caller.
4884 */
4885 if (!pdev->link_active_reporting) {
4886 msleep(timeout + delay);
4887 return true;
4888 }
4889
4890 /*
4891 * PCIe r4.0 sec 6.6.1, a component must enter LTSSM Detect within 20ms,
4892 * after which we should expect an link active if the reset was
4893 * successful. If so, software must wait a minimum 100ms before sending
4894 * configuration requests to devices downstream this port.
4895 *
4896 * If the link fails to activate, either the device was physically
4897 * removed or the link is permanently failed.
4898 */
4899 if (active)
4900 msleep(20);
4901 for (;;) {
4902 pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
4903 ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
4904 if (ret == active)
4905 break;
4906 if (timeout <= 0)
4907 break;
4908 msleep(10);
4909 timeout -= 10;
4910 }
4911 if (active && ret)
4912 msleep(delay);
4913
4914 return ret == active;
4915 }
4916
4917 /**
4918 * pcie_wait_for_link - Wait until link is active or inactive
4919 * @pdev: Bridge device
4920 * @active: waiting for active or inactive?
4921 *
4922 * Use this to wait till link becomes active or inactive.
4923 */
pcie_wait_for_link(struct pci_dev * pdev,bool active)4924 bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
4925 {
4926 return pcie_wait_for_link_delay(pdev, active, 100);
4927 }
4928
4929 /*
4930 * Find maximum D3cold delay required by all the devices on the bus. The
4931 * spec says 100 ms, but firmware can lower it and we allow drivers to
4932 * increase it as well.
4933 *
4934 * Called with @pci_bus_sem locked for reading.
4935 */
pci_bus_max_d3cold_delay(const struct pci_bus * bus)4936 static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
4937 {
4938 const struct pci_dev *pdev;
4939 int min_delay = 100;
4940 int max_delay = 0;
4941
4942 list_for_each_entry(pdev, &bus->devices, bus_list) {
4943 if (pdev->d3cold_delay < min_delay)
4944 min_delay = pdev->d3cold_delay;
4945 if (pdev->d3cold_delay > max_delay)
4946 max_delay = pdev->d3cold_delay;
4947 }
4948
4949 return max(min_delay, max_delay);
4950 }
4951
4952 /**
4953 * pci_bridge_wait_for_secondary_bus - Wait for secondary bus to be accessible
4954 * @dev: PCI bridge
4955 * @reset_type: reset type in human-readable form
4956 * @timeout: maximum time to wait for devices on secondary bus (milliseconds)
4957 *
4958 * Handle necessary delays before access to the devices on the secondary
4959 * side of the bridge are permitted after D3cold to D0 transition
4960 * or Conventional Reset.
4961 *
4962 * For PCIe this means the delays in PCIe 5.0 section 6.6.1. For
4963 * conventional PCI it means Tpvrh + Trhfa specified in PCI 3.0 section
4964 * 4.3.2.
4965 *
4966 * Return 0 on success or -ENOTTY if the first device on the secondary bus
4967 * failed to become accessible.
4968 */
pci_bridge_wait_for_secondary_bus(struct pci_dev * dev,char * reset_type,int timeout)4969 int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type,
4970 int timeout)
4971 {
4972 struct pci_dev *child;
4973 int delay;
4974
4975 if (pci_dev_is_disconnected(dev))
4976 return 0;
4977
4978 if (!pci_is_bridge(dev))
4979 return 0;
4980
4981 down_read(&pci_bus_sem);
4982
4983 /*
4984 * We only deal with devices that are present currently on the bus.
4985 * For any hot-added devices the access delay is handled in pciehp
4986 * board_added(). In case of ACPI hotplug the firmware is expected
4987 * to configure the devices before OS is notified.
4988 */
4989 if (!dev->subordinate || list_empty(&dev->subordinate->devices)) {
4990 up_read(&pci_bus_sem);
4991 return 0;
4992 }
4993
4994 /* Take d3cold_delay requirements into account */
4995 delay = pci_bus_max_d3cold_delay(dev->subordinate);
4996 if (!delay) {
4997 up_read(&pci_bus_sem);
4998 return 0;
4999 }
5000
5001 child = list_first_entry(&dev->subordinate->devices, struct pci_dev,
5002 bus_list);
5003 up_read(&pci_bus_sem);
5004
5005 /*
5006 * Conventional PCI and PCI-X we need to wait Tpvrh + Trhfa before
5007 * accessing the device after reset (that is 1000 ms + 100 ms).
5008 */
5009 if (!pci_is_pcie(dev)) {
5010 pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay);
5011 msleep(1000 + delay);
5012 return 0;
5013 }
5014
5015 /*
5016 * For PCIe downstream and root ports that do not support speeds
5017 * greater than 5 GT/s need to wait minimum 100 ms. For higher
5018 * speeds (gen3) we need to wait first for the data link layer to
5019 * become active.
5020 *
5021 * However, 100 ms is the minimum and the PCIe spec says the
5022 * software must allow at least 1s before it can determine that the
5023 * device that did not respond is a broken device. There is
5024 * evidence that 100 ms is not always enough, for example certain
5025 * Titan Ridge xHCI controller does not always respond to
5026 * configuration requests if we only wait for 100 ms (see
5027 * https://bugzilla.kernel.org/show_bug.cgi?id=203885).
5028 *
5029 * Therefore we wait for 100 ms and check for the device presence
5030 * until the timeout expires.
5031 */
5032 if (!pcie_downstream_port(dev))
5033 return 0;
5034
5035 if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
5036 pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
5037 msleep(delay);
5038 } else {
5039 pci_dbg(dev, "waiting %d ms for downstream link, after activation\n",
5040 delay);
5041 if (!pcie_wait_for_link_delay(dev, true, delay)) {
5042 /* Did not train, no need to wait any further */
5043 pci_info(dev, "Data Link Layer Link Active not set in 1000 msec\n");
5044 return -ENOTTY;
5045 }
5046 }
5047
5048 return pci_dev_wait(child, reset_type, timeout - delay);
5049 }
5050
pci_reset_secondary_bus(struct pci_dev * dev)5051 void pci_reset_secondary_bus(struct pci_dev *dev)
5052 {
5053 u16 ctrl;
5054
5055 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
5056 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
5057 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
5058
5059 /*
5060 * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms. Double
5061 * this to 2ms to ensure that we meet the minimum requirement.
5062 */
5063 msleep(2);
5064
5065 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
5066 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
5067 }
5068
pcibios_reset_secondary_bus(struct pci_dev * dev)5069 void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
5070 {
5071 pci_reset_secondary_bus(dev);
5072 }
5073
5074 /**
5075 * pci_bridge_secondary_bus_reset - Reset the secondary bus on a PCI bridge.
5076 * @dev: Bridge device
5077 *
5078 * Use the bridge control register to assert reset on the secondary bus.
5079 * Devices on the secondary bus are left in power-on state.
5080 */
pci_bridge_secondary_bus_reset(struct pci_dev * dev)5081 int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
5082 {
5083 pcibios_reset_secondary_bus(dev);
5084
5085 return pci_bridge_wait_for_secondary_bus(dev, "bus reset",
5086 PCIE_RESET_READY_POLL_MS);
5087 }
5088 EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
5089
pci_parent_bus_reset(struct pci_dev * dev,bool probe)5090 static int pci_parent_bus_reset(struct pci_dev *dev, bool probe)
5091 {
5092 struct pci_dev *pdev;
5093
5094 if (pci_is_root_bus(dev->bus) || dev->subordinate ||
5095 !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
5096 return -ENOTTY;
5097
5098 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
5099 if (pdev != dev)
5100 return -ENOTTY;
5101
5102 if (probe)
5103 return 0;
5104
5105 return pci_bridge_secondary_bus_reset(dev->bus->self);
5106 }
5107
pci_reset_hotplug_slot(struct hotplug_slot * hotplug,bool probe)5108 static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, bool probe)
5109 {
5110 int rc = -ENOTTY;
5111
5112 if (!hotplug || !try_module_get(hotplug->owner))
5113 return rc;
5114
5115 if (hotplug->ops->reset_slot)
5116 rc = hotplug->ops->reset_slot(hotplug, probe);
5117
5118 module_put(hotplug->owner);
5119
5120 return rc;
5121 }
5122
pci_dev_reset_slot_function(struct pci_dev * dev,bool probe)5123 static int pci_dev_reset_slot_function(struct pci_dev *dev, bool probe)
5124 {
5125 if (dev->multifunction || dev->subordinate || !dev->slot ||
5126 dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
5127 return -ENOTTY;
5128
5129 return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
5130 }
5131
pci_reset_bus_function(struct pci_dev * dev,bool probe)5132 static int pci_reset_bus_function(struct pci_dev *dev, bool probe)
5133 {
5134 int rc;
5135
5136 rc = pci_dev_reset_slot_function(dev, probe);
5137 if (rc != -ENOTTY)
5138 return rc;
5139 return pci_parent_bus_reset(dev, probe);
5140 }
5141
pci_dev_lock(struct pci_dev * dev)5142 void pci_dev_lock(struct pci_dev *dev)
5143 {
5144 /* block PM suspend, driver probe, etc. */
5145 device_lock(&dev->dev);
5146 pci_cfg_access_lock(dev);
5147 }
5148 EXPORT_SYMBOL_GPL(pci_dev_lock);
5149
5150 /* Return 1 on successful lock, 0 on contention */
pci_dev_trylock(struct pci_dev * dev)5151 int pci_dev_trylock(struct pci_dev *dev)
5152 {
5153 if (device_trylock(&dev->dev)) {
5154 if (pci_cfg_access_trylock(dev))
5155 return 1;
5156 device_unlock(&dev->dev);
5157 }
5158
5159 return 0;
5160 }
5161 EXPORT_SYMBOL_GPL(pci_dev_trylock);
5162
pci_dev_unlock(struct pci_dev * dev)5163 void pci_dev_unlock(struct pci_dev *dev)
5164 {
5165 pci_cfg_access_unlock(dev);
5166 device_unlock(&dev->dev);
5167 }
5168 EXPORT_SYMBOL_GPL(pci_dev_unlock);
5169
pci_dev_save_and_disable(struct pci_dev * dev)5170 static void pci_dev_save_and_disable(struct pci_dev *dev)
5171 {
5172 const struct pci_error_handlers *err_handler =
5173 dev->driver ? dev->driver->err_handler : NULL;
5174
5175 /*
5176 * dev->driver->err_handler->reset_prepare() is protected against
5177 * races with ->remove() by the device lock, which must be held by
5178 * the caller.
5179 */
5180 if (err_handler && err_handler->reset_prepare)
5181 err_handler->reset_prepare(dev);
5182
5183 /*
5184 * Wake-up device prior to save. PM registers default to D0 after
5185 * reset and a simple register restore doesn't reliably return
5186 * to a non-D0 state anyway.
5187 */
5188 pci_set_power_state(dev, PCI_D0);
5189
5190 pci_save_state(dev);
5191 /*
5192 * Disable the device by clearing the Command register, except for
5193 * INTx-disable which is set. This not only disables MMIO and I/O port
5194 * BARs, but also prevents the device from being Bus Master, preventing
5195 * DMA from the device including MSI/MSI-X interrupts. For PCI 2.3
5196 * compliant devices, INTx-disable prevents legacy interrupts.
5197 */
5198 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
5199 }
5200
pci_dev_restore(struct pci_dev * dev)5201 static void pci_dev_restore(struct pci_dev *dev)
5202 {
5203 const struct pci_error_handlers *err_handler =
5204 dev->driver ? dev->driver->err_handler : NULL;
5205
5206 pci_restore_state(dev);
5207
5208 /*
5209 * dev->driver->err_handler->reset_done() is protected against
5210 * races with ->remove() by the device lock, which must be held by
5211 * the caller.
5212 */
5213 if (err_handler && err_handler->reset_done)
5214 err_handler->reset_done(dev);
5215 }
5216
5217 /* dev->reset_methods[] is a 0-terminated list of indices into this array */
5218 static const struct pci_reset_fn_method pci_reset_fn_methods[] = {
5219 { },
5220 { pci_dev_specific_reset, .name = "device_specific" },
5221 { pci_dev_acpi_reset, .name = "acpi" },
5222 { pcie_reset_flr, .name = "flr" },
5223 { pci_af_flr, .name = "af_flr" },
5224 { pci_pm_reset, .name = "pm" },
5225 { pci_reset_bus_function, .name = "bus" },
5226 };
5227
reset_method_show(struct device * dev,struct device_attribute * attr,char * buf)5228 static ssize_t reset_method_show(struct device *dev,
5229 struct device_attribute *attr, char *buf)
5230 {
5231 struct pci_dev *pdev = to_pci_dev(dev);
5232 ssize_t len = 0;
5233 int i, m;
5234
5235 for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
5236 m = pdev->reset_methods[i];
5237 if (!m)
5238 break;
5239
5240 len += sysfs_emit_at(buf, len, "%s%s", len ? " " : "",
5241 pci_reset_fn_methods[m].name);
5242 }
5243
5244 if (len)
5245 len += sysfs_emit_at(buf, len, "\n");
5246
5247 return len;
5248 }
5249
reset_method_lookup(const char * name)5250 static int reset_method_lookup(const char *name)
5251 {
5252 int m;
5253
5254 for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
5255 if (sysfs_streq(name, pci_reset_fn_methods[m].name))
5256 return m;
5257 }
5258
5259 return 0; /* not found */
5260 }
5261
reset_method_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5262 static ssize_t reset_method_store(struct device *dev,
5263 struct device_attribute *attr,
5264 const char *buf, size_t count)
5265 {
5266 struct pci_dev *pdev = to_pci_dev(dev);
5267 char *options, *name;
5268 int m, n;
5269 u8 reset_methods[PCI_NUM_RESET_METHODS] = { 0 };
5270
5271 if (sysfs_streq(buf, "")) {
5272 pdev->reset_methods[0] = 0;
5273 pci_warn(pdev, "All device reset methods disabled by user");
5274 return count;
5275 }
5276
5277 if (sysfs_streq(buf, "default")) {
5278 pci_init_reset_methods(pdev);
5279 return count;
5280 }
5281
5282 options = kstrndup(buf, count, GFP_KERNEL);
5283 if (!options)
5284 return -ENOMEM;
5285
5286 n = 0;
5287 while ((name = strsep(&options, " ")) != NULL) {
5288 if (sysfs_streq(name, ""))
5289 continue;
5290
5291 name = strim(name);
5292
5293 m = reset_method_lookup(name);
5294 if (!m) {
5295 pci_err(pdev, "Invalid reset method '%s'", name);
5296 goto error;
5297 }
5298
5299 if (pci_reset_fn_methods[m].reset_fn(pdev, PCI_RESET_PROBE)) {
5300 pci_err(pdev, "Unsupported reset method '%s'", name);
5301 goto error;
5302 }
5303
5304 if (n == PCI_NUM_RESET_METHODS - 1) {
5305 pci_err(pdev, "Too many reset methods\n");
5306 goto error;
5307 }
5308
5309 reset_methods[n++] = m;
5310 }
5311
5312 reset_methods[n] = 0;
5313
5314 /* Warn if dev-specific supported but not highest priority */
5315 if (pci_reset_fn_methods[1].reset_fn(pdev, PCI_RESET_PROBE) == 0 &&
5316 reset_methods[0] != 1)
5317 pci_warn(pdev, "Device-specific reset disabled/de-prioritized by user");
5318 memcpy(pdev->reset_methods, reset_methods, sizeof(pdev->reset_methods));
5319 kfree(options);
5320 return count;
5321
5322 error:
5323 /* Leave previous methods unchanged */
5324 kfree(options);
5325 return -EINVAL;
5326 }
5327 static DEVICE_ATTR_RW(reset_method);
5328
5329 static struct attribute *pci_dev_reset_method_attrs[] = {
5330 &dev_attr_reset_method.attr,
5331 NULL,
5332 };
5333
pci_dev_reset_method_attr_is_visible(struct kobject * kobj,struct attribute * a,int n)5334 static umode_t pci_dev_reset_method_attr_is_visible(struct kobject *kobj,
5335 struct attribute *a, int n)
5336 {
5337 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
5338
5339 if (!pci_reset_supported(pdev))
5340 return 0;
5341
5342 return a->mode;
5343 }
5344
5345 const struct attribute_group pci_dev_reset_method_attr_group = {
5346 .attrs = pci_dev_reset_method_attrs,
5347 .is_visible = pci_dev_reset_method_attr_is_visible,
5348 };
5349
5350 /**
5351 * __pci_reset_function_locked - reset a PCI device function while holding
5352 * the @dev mutex lock.
5353 * @dev: PCI device to reset
5354 *
5355 * Some devices allow an individual function to be reset without affecting
5356 * other functions in the same device. The PCI device must be responsive
5357 * to PCI config space in order to use this function.
5358 *
5359 * The device function is presumed to be unused and the caller is holding
5360 * the device mutex lock when this function is called.
5361 *
5362 * Resetting the device will make the contents of PCI configuration space
5363 * random, so any caller of this must be prepared to reinitialise the
5364 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
5365 * etc.
5366 *
5367 * Returns 0 if the device function was successfully reset or negative if the
5368 * device doesn't support resetting a single function.
5369 */
__pci_reset_function_locked(struct pci_dev * dev)5370 int __pci_reset_function_locked(struct pci_dev *dev)
5371 {
5372 int i, m, rc;
5373
5374 might_sleep();
5375
5376 /*
5377 * A reset method returns -ENOTTY if it doesn't support this device and
5378 * we should try the next method.
5379 *
5380 * If it returns 0 (success), we're finished. If it returns any other
5381 * error, we're also finished: this indicates that further reset
5382 * mechanisms might be broken on the device.
5383 */
5384 for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
5385 m = dev->reset_methods[i];
5386 if (!m)
5387 return -ENOTTY;
5388
5389 rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_DO_RESET);
5390 if (!rc)
5391 return 0;
5392 if (rc != -ENOTTY)
5393 return rc;
5394 }
5395
5396 return -ENOTTY;
5397 }
5398 EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
5399
5400 /**
5401 * pci_init_reset_methods - check whether device can be safely reset
5402 * and store supported reset mechanisms.
5403 * @dev: PCI device to check for reset mechanisms
5404 *
5405 * Some devices allow an individual function to be reset without affecting
5406 * other functions in the same device. The PCI device must be in D0-D3hot
5407 * state.
5408 *
5409 * Stores reset mechanisms supported by device in reset_methods byte array
5410 * which is a member of struct pci_dev.
5411 */
pci_init_reset_methods(struct pci_dev * dev)5412 void pci_init_reset_methods(struct pci_dev *dev)
5413 {
5414 int m, i, rc;
5415
5416 BUILD_BUG_ON(ARRAY_SIZE(pci_reset_fn_methods) != PCI_NUM_RESET_METHODS);
5417
5418 might_sleep();
5419
5420 i = 0;
5421 for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
5422 rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_PROBE);
5423 if (!rc)
5424 dev->reset_methods[i++] = m;
5425 else if (rc != -ENOTTY)
5426 break;
5427 }
5428
5429 dev->reset_methods[i] = 0;
5430 }
5431
5432 /**
5433 * pci_reset_function - quiesce and reset a PCI device function
5434 * @dev: PCI device to reset
5435 *
5436 * Some devices allow an individual function to be reset without affecting
5437 * other functions in the same device. The PCI device must be responsive
5438 * to PCI config space in order to use this function.
5439 *
5440 * This function does not just reset the PCI portion of a device, but
5441 * clears all the state associated with the device. This function differs
5442 * from __pci_reset_function_locked() in that it saves and restores device state
5443 * over the reset and takes the PCI device lock.
5444 *
5445 * Returns 0 if the device function was successfully reset or negative if the
5446 * device doesn't support resetting a single function.
5447 */
pci_reset_function(struct pci_dev * dev)5448 int pci_reset_function(struct pci_dev *dev)
5449 {
5450 int rc;
5451
5452 if (!pci_reset_supported(dev))
5453 return -ENOTTY;
5454
5455 pci_dev_lock(dev);
5456 pci_dev_save_and_disable(dev);
5457
5458 rc = __pci_reset_function_locked(dev);
5459
5460 pci_dev_restore(dev);
5461 pci_dev_unlock(dev);
5462
5463 return rc;
5464 }
5465 EXPORT_SYMBOL_GPL(pci_reset_function);
5466
5467 /**
5468 * pci_reset_function_locked - quiesce and reset a PCI device function
5469 * @dev: PCI device to reset
5470 *
5471 * Some devices allow an individual function to be reset without affecting
5472 * other functions in the same device. The PCI device must be responsive
5473 * to PCI config space in order to use this function.
5474 *
5475 * This function does not just reset the PCI portion of a device, but
5476 * clears all the state associated with the device. This function differs
5477 * from __pci_reset_function_locked() in that it saves and restores device state
5478 * over the reset. It also differs from pci_reset_function() in that it
5479 * requires the PCI device lock to be held.
5480 *
5481 * Returns 0 if the device function was successfully reset or negative if the
5482 * device doesn't support resetting a single function.
5483 */
pci_reset_function_locked(struct pci_dev * dev)5484 int pci_reset_function_locked(struct pci_dev *dev)
5485 {
5486 int rc;
5487
5488 if (!pci_reset_supported(dev))
5489 return -ENOTTY;
5490
5491 pci_dev_save_and_disable(dev);
5492
5493 rc = __pci_reset_function_locked(dev);
5494
5495 pci_dev_restore(dev);
5496
5497 return rc;
5498 }
5499 EXPORT_SYMBOL_GPL(pci_reset_function_locked);
5500
5501 /**
5502 * pci_try_reset_function - quiesce and reset a PCI device function
5503 * @dev: PCI device to reset
5504 *
5505 * Same as above, except return -EAGAIN if unable to lock device.
5506 */
pci_try_reset_function(struct pci_dev * dev)5507 int pci_try_reset_function(struct pci_dev *dev)
5508 {
5509 int rc;
5510
5511 if (!pci_reset_supported(dev))
5512 return -ENOTTY;
5513
5514 if (!pci_dev_trylock(dev))
5515 return -EAGAIN;
5516
5517 pci_dev_save_and_disable(dev);
5518 rc = __pci_reset_function_locked(dev);
5519 pci_dev_restore(dev);
5520 pci_dev_unlock(dev);
5521
5522 return rc;
5523 }
5524 EXPORT_SYMBOL_GPL(pci_try_reset_function);
5525
5526 /* Do any devices on or below this bus prevent a bus reset? */
pci_bus_resetable(struct pci_bus * bus)5527 static bool pci_bus_resetable(struct pci_bus *bus)
5528 {
5529 struct pci_dev *dev;
5530
5531
5532 if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5533 return false;
5534
5535 list_for_each_entry(dev, &bus->devices, bus_list) {
5536 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5537 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5538 return false;
5539 }
5540
5541 return true;
5542 }
5543
5544 /* Lock devices from the top of the tree down */
pci_bus_lock(struct pci_bus * bus)5545 static void pci_bus_lock(struct pci_bus *bus)
5546 {
5547 struct pci_dev *dev;
5548
5549 list_for_each_entry(dev, &bus->devices, bus_list) {
5550 pci_dev_lock(dev);
5551 if (dev->subordinate)
5552 pci_bus_lock(dev->subordinate);
5553 }
5554 }
5555
5556 /* Unlock devices from the bottom of the tree up */
pci_bus_unlock(struct pci_bus * bus)5557 static void pci_bus_unlock(struct pci_bus *bus)
5558 {
5559 struct pci_dev *dev;
5560
5561 list_for_each_entry(dev, &bus->devices, bus_list) {
5562 if (dev->subordinate)
5563 pci_bus_unlock(dev->subordinate);
5564 pci_dev_unlock(dev);
5565 }
5566 }
5567
5568 /* Return 1 on successful lock, 0 on contention */
pci_bus_trylock(struct pci_bus * bus)5569 static int pci_bus_trylock(struct pci_bus *bus)
5570 {
5571 struct pci_dev *dev;
5572
5573 list_for_each_entry(dev, &bus->devices, bus_list) {
5574 if (!pci_dev_trylock(dev))
5575 goto unlock;
5576 if (dev->subordinate) {
5577 if (!pci_bus_trylock(dev->subordinate)) {
5578 pci_dev_unlock(dev);
5579 goto unlock;
5580 }
5581 }
5582 }
5583 return 1;
5584
5585 unlock:
5586 list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
5587 if (dev->subordinate)
5588 pci_bus_unlock(dev->subordinate);
5589 pci_dev_unlock(dev);
5590 }
5591 return 0;
5592 }
5593
5594 /* Do any devices on or below this slot prevent a bus reset? */
pci_slot_resetable(struct pci_slot * slot)5595 static bool pci_slot_resetable(struct pci_slot *slot)
5596 {
5597 struct pci_dev *dev;
5598
5599 if (slot->bus->self &&
5600 (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5601 return false;
5602
5603 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5604 if (!dev->slot || dev->slot != slot)
5605 continue;
5606 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5607 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5608 return false;
5609 }
5610
5611 return true;
5612 }
5613
5614 /* Lock devices from the top of the tree down */
pci_slot_lock(struct pci_slot * slot)5615 static void pci_slot_lock(struct pci_slot *slot)
5616 {
5617 struct pci_dev *dev;
5618
5619 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5620 if (!dev->slot || dev->slot != slot)
5621 continue;
5622 pci_dev_lock(dev);
5623 if (dev->subordinate)
5624 pci_bus_lock(dev->subordinate);
5625 }
5626 }
5627
5628 /* Unlock devices from the bottom of the tree up */
pci_slot_unlock(struct pci_slot * slot)5629 static void pci_slot_unlock(struct pci_slot *slot)
5630 {
5631 struct pci_dev *dev;
5632
5633 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5634 if (!dev->slot || dev->slot != slot)
5635 continue;
5636 if (dev->subordinate)
5637 pci_bus_unlock(dev->subordinate);
5638 pci_dev_unlock(dev);
5639 }
5640 }
5641
5642 /* Return 1 on successful lock, 0 on contention */
pci_slot_trylock(struct pci_slot * slot)5643 static int pci_slot_trylock(struct pci_slot *slot)
5644 {
5645 struct pci_dev *dev;
5646
5647 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5648 if (!dev->slot || dev->slot != slot)
5649 continue;
5650 if (!pci_dev_trylock(dev))
5651 goto unlock;
5652 if (dev->subordinate) {
5653 if (!pci_bus_trylock(dev->subordinate)) {
5654 pci_dev_unlock(dev);
5655 goto unlock;
5656 }
5657 }
5658 }
5659 return 1;
5660
5661 unlock:
5662 list_for_each_entry_continue_reverse(dev,
5663 &slot->bus->devices, bus_list) {
5664 if (!dev->slot || dev->slot != slot)
5665 continue;
5666 if (dev->subordinate)
5667 pci_bus_unlock(dev->subordinate);
5668 pci_dev_unlock(dev);
5669 }
5670 return 0;
5671 }
5672
5673 /*
5674 * Save and disable devices from the top of the tree down while holding
5675 * the @dev mutex lock for the entire tree.
5676 */
pci_bus_save_and_disable_locked(struct pci_bus * bus)5677 static void pci_bus_save_and_disable_locked(struct pci_bus *bus)
5678 {
5679 struct pci_dev *dev;
5680
5681 list_for_each_entry(dev, &bus->devices, bus_list) {
5682 pci_dev_save_and_disable(dev);
5683 if (dev->subordinate)
5684 pci_bus_save_and_disable_locked(dev->subordinate);
5685 }
5686 }
5687
5688 /*
5689 * Restore devices from top of the tree down while holding @dev mutex lock
5690 * for the entire tree. Parent bridges need to be restored before we can
5691 * get to subordinate devices.
5692 */
pci_bus_restore_locked(struct pci_bus * bus)5693 static void pci_bus_restore_locked(struct pci_bus *bus)
5694 {
5695 struct pci_dev *dev;
5696
5697 list_for_each_entry(dev, &bus->devices, bus_list) {
5698 pci_dev_restore(dev);
5699 if (dev->subordinate)
5700 pci_bus_restore_locked(dev->subordinate);
5701 }
5702 }
5703
5704 /*
5705 * Save and disable devices from the top of the tree down while holding
5706 * the @dev mutex lock for the entire tree.
5707 */
pci_slot_save_and_disable_locked(struct pci_slot * slot)5708 static void pci_slot_save_and_disable_locked(struct pci_slot *slot)
5709 {
5710 struct pci_dev *dev;
5711
5712 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5713 if (!dev->slot || dev->slot != slot)
5714 continue;
5715 pci_dev_save_and_disable(dev);
5716 if (dev->subordinate)
5717 pci_bus_save_and_disable_locked(dev->subordinate);
5718 }
5719 }
5720
5721 /*
5722 * Restore devices from top of the tree down while holding @dev mutex lock
5723 * for the entire tree. Parent bridges need to be restored before we can
5724 * get to subordinate devices.
5725 */
pci_slot_restore_locked(struct pci_slot * slot)5726 static void pci_slot_restore_locked(struct pci_slot *slot)
5727 {
5728 struct pci_dev *dev;
5729
5730 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5731 if (!dev->slot || dev->slot != slot)
5732 continue;
5733 pci_dev_restore(dev);
5734 if (dev->subordinate)
5735 pci_bus_restore_locked(dev->subordinate);
5736 }
5737 }
5738
pci_slot_reset(struct pci_slot * slot,bool probe)5739 static int pci_slot_reset(struct pci_slot *slot, bool probe)
5740 {
5741 int rc;
5742
5743 if (!slot || !pci_slot_resetable(slot))
5744 return -ENOTTY;
5745
5746 if (!probe)
5747 pci_slot_lock(slot);
5748
5749 might_sleep();
5750
5751 rc = pci_reset_hotplug_slot(slot->hotplug, probe);
5752
5753 if (!probe)
5754 pci_slot_unlock(slot);
5755
5756 return rc;
5757 }
5758
5759 /**
5760 * pci_probe_reset_slot - probe whether a PCI slot can be reset
5761 * @slot: PCI slot to probe
5762 *
5763 * Return 0 if slot can be reset, negative if a slot reset is not supported.
5764 */
pci_probe_reset_slot(struct pci_slot * slot)5765 int pci_probe_reset_slot(struct pci_slot *slot)
5766 {
5767 return pci_slot_reset(slot, PCI_RESET_PROBE);
5768 }
5769 EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
5770
5771 /**
5772 * __pci_reset_slot - Try to reset a PCI slot
5773 * @slot: PCI slot to reset
5774 *
5775 * A PCI bus may host multiple slots, each slot may support a reset mechanism
5776 * independent of other slots. For instance, some slots may support slot power
5777 * control. In the case of a 1:1 bus to slot architecture, this function may
5778 * wrap the bus reset to avoid spurious slot related events such as hotplug.
5779 * Generally a slot reset should be attempted before a bus reset. All of the
5780 * function of the slot and any subordinate buses behind the slot are reset
5781 * through this function. PCI config space of all devices in the slot and
5782 * behind the slot is saved before and restored after reset.
5783 *
5784 * Same as above except return -EAGAIN if the slot cannot be locked
5785 */
__pci_reset_slot(struct pci_slot * slot)5786 static int __pci_reset_slot(struct pci_slot *slot)
5787 {
5788 int rc;
5789
5790 rc = pci_slot_reset(slot, PCI_RESET_PROBE);
5791 if (rc)
5792 return rc;
5793
5794 if (pci_slot_trylock(slot)) {
5795 pci_slot_save_and_disable_locked(slot);
5796 might_sleep();
5797 rc = pci_reset_hotplug_slot(slot->hotplug, PCI_RESET_DO_RESET);
5798 pci_slot_restore_locked(slot);
5799 pci_slot_unlock(slot);
5800 } else
5801 rc = -EAGAIN;
5802
5803 return rc;
5804 }
5805
pci_bus_reset(struct pci_bus * bus,bool probe)5806 static int pci_bus_reset(struct pci_bus *bus, bool probe)
5807 {
5808 int ret;
5809
5810 if (!bus->self || !pci_bus_resetable(bus))
5811 return -ENOTTY;
5812
5813 if (probe)
5814 return 0;
5815
5816 pci_bus_lock(bus);
5817
5818 might_sleep();
5819
5820 ret = pci_bridge_secondary_bus_reset(bus->self);
5821
5822 pci_bus_unlock(bus);
5823
5824 return ret;
5825 }
5826
5827 /**
5828 * pci_bus_error_reset - reset the bridge's subordinate bus
5829 * @bridge: The parent device that connects to the bus to reset
5830 *
5831 * This function will first try to reset the slots on this bus if the method is
5832 * available. If slot reset fails or is not available, this will fall back to a
5833 * secondary bus reset.
5834 */
pci_bus_error_reset(struct pci_dev * bridge)5835 int pci_bus_error_reset(struct pci_dev *bridge)
5836 {
5837 struct pci_bus *bus = bridge->subordinate;
5838 struct pci_slot *slot;
5839
5840 if (!bus)
5841 return -ENOTTY;
5842
5843 mutex_lock(&pci_slot_mutex);
5844 if (list_empty(&bus->slots))
5845 goto bus_reset;
5846
5847 list_for_each_entry(slot, &bus->slots, list)
5848 if (pci_probe_reset_slot(slot))
5849 goto bus_reset;
5850
5851 list_for_each_entry(slot, &bus->slots, list)
5852 if (pci_slot_reset(slot, PCI_RESET_DO_RESET))
5853 goto bus_reset;
5854
5855 mutex_unlock(&pci_slot_mutex);
5856 return 0;
5857 bus_reset:
5858 mutex_unlock(&pci_slot_mutex);
5859 return pci_bus_reset(bridge->subordinate, PCI_RESET_DO_RESET);
5860 }
5861
5862 /**
5863 * pci_probe_reset_bus - probe whether a PCI bus can be reset
5864 * @bus: PCI bus to probe
5865 *
5866 * Return 0 if bus can be reset, negative if a bus reset is not supported.
5867 */
pci_probe_reset_bus(struct pci_bus * bus)5868 int pci_probe_reset_bus(struct pci_bus *bus)
5869 {
5870 return pci_bus_reset(bus, PCI_RESET_PROBE);
5871 }
5872 EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
5873
5874 /**
5875 * __pci_reset_bus - Try to reset a PCI bus
5876 * @bus: top level PCI bus to reset
5877 *
5878 * Same as above except return -EAGAIN if the bus cannot be locked
5879 */
__pci_reset_bus(struct pci_bus * bus)5880 static int __pci_reset_bus(struct pci_bus *bus)
5881 {
5882 int rc;
5883
5884 rc = pci_bus_reset(bus, PCI_RESET_PROBE);
5885 if (rc)
5886 return rc;
5887
5888 if (pci_bus_trylock(bus)) {
5889 pci_bus_save_and_disable_locked(bus);
5890 might_sleep();
5891 rc = pci_bridge_secondary_bus_reset(bus->self);
5892 pci_bus_restore_locked(bus);
5893 pci_bus_unlock(bus);
5894 } else
5895 rc = -EAGAIN;
5896
5897 return rc;
5898 }
5899
5900 /**
5901 * pci_reset_bus - Try to reset a PCI bus
5902 * @pdev: top level PCI device to reset via slot/bus
5903 *
5904 * Same as above except return -EAGAIN if the bus cannot be locked
5905 */
pci_reset_bus(struct pci_dev * pdev)5906 int pci_reset_bus(struct pci_dev *pdev)
5907 {
5908 return (!pci_probe_reset_slot(pdev->slot)) ?
5909 __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
5910 }
5911 EXPORT_SYMBOL_GPL(pci_reset_bus);
5912
5913 /**
5914 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
5915 * @dev: PCI device to query
5916 *
5917 * Returns mmrbc: maximum designed memory read count in bytes or
5918 * appropriate error value.
5919 */
pcix_get_max_mmrbc(struct pci_dev * dev)5920 int pcix_get_max_mmrbc(struct pci_dev *dev)
5921 {
5922 int cap;
5923 u32 stat;
5924
5925 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5926 if (!cap)
5927 return -EINVAL;
5928
5929 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5930 return -EINVAL;
5931
5932 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
5933 }
5934 EXPORT_SYMBOL(pcix_get_max_mmrbc);
5935
5936 /**
5937 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
5938 * @dev: PCI device to query
5939 *
5940 * Returns mmrbc: maximum memory read count in bytes or appropriate error
5941 * value.
5942 */
pcix_get_mmrbc(struct pci_dev * dev)5943 int pcix_get_mmrbc(struct pci_dev *dev)
5944 {
5945 int cap;
5946 u16 cmd;
5947
5948 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5949 if (!cap)
5950 return -EINVAL;
5951
5952 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5953 return -EINVAL;
5954
5955 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
5956 }
5957 EXPORT_SYMBOL(pcix_get_mmrbc);
5958
5959 /**
5960 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
5961 * @dev: PCI device to query
5962 * @mmrbc: maximum memory read count in bytes
5963 * valid values are 512, 1024, 2048, 4096
5964 *
5965 * If possible sets maximum memory read byte count, some bridges have errata
5966 * that prevent this.
5967 */
pcix_set_mmrbc(struct pci_dev * dev,int mmrbc)5968 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
5969 {
5970 int cap;
5971 u32 stat, v, o;
5972 u16 cmd;
5973
5974 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
5975 return -EINVAL;
5976
5977 v = ffs(mmrbc) - 10;
5978
5979 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5980 if (!cap)
5981 return -EINVAL;
5982
5983 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5984 return -EINVAL;
5985
5986 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
5987 return -E2BIG;
5988
5989 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5990 return -EINVAL;
5991
5992 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
5993 if (o != v) {
5994 if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
5995 return -EIO;
5996
5997 cmd &= ~PCI_X_CMD_MAX_READ;
5998 cmd |= v << 2;
5999 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
6000 return -EIO;
6001 }
6002 return 0;
6003 }
6004 EXPORT_SYMBOL(pcix_set_mmrbc);
6005
6006 /**
6007 * pcie_get_readrq - get PCI Express read request size
6008 * @dev: PCI device to query
6009 *
6010 * Returns maximum memory read request in bytes or appropriate error value.
6011 */
pcie_get_readrq(struct pci_dev * dev)6012 int pcie_get_readrq(struct pci_dev *dev)
6013 {
6014 u16 ctl;
6015
6016 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
6017
6018 return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6019 }
6020 EXPORT_SYMBOL(pcie_get_readrq);
6021
6022 /**
6023 * pcie_set_readrq - set PCI Express maximum memory read request
6024 * @dev: PCI device to query
6025 * @rq: maximum memory read count in bytes
6026 * valid values are 128, 256, 512, 1024, 2048, 4096
6027 *
6028 * If possible sets maximum memory read request in bytes
6029 */
pcie_set_readrq(struct pci_dev * dev,int rq)6030 int pcie_set_readrq(struct pci_dev *dev, int rq)
6031 {
6032 u16 v;
6033 int ret;
6034 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
6035
6036 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
6037 return -EINVAL;
6038
6039 /*
6040 * If using the "performance" PCIe config, we clamp the read rq
6041 * size to the max packet size to keep the host bridge from
6042 * generating requests larger than we can cope with.
6043 */
6044 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
6045 int mps = pcie_get_mps(dev);
6046
6047 if (mps < rq)
6048 rq = mps;
6049 }
6050
6051 v = (ffs(rq) - 8) << 12;
6052
6053 if (bridge->no_inc_mrrs) {
6054 int max_mrrs = pcie_get_readrq(dev);
6055
6056 if (rq > max_mrrs) {
6057 pci_info(dev, "can't set Max_Read_Request_Size to %d; max is %d\n", rq, max_mrrs);
6058 return -EINVAL;
6059 }
6060 }
6061
6062 ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
6063 PCI_EXP_DEVCTL_READRQ, v);
6064
6065 return pcibios_err_to_errno(ret);
6066 }
6067 EXPORT_SYMBOL(pcie_set_readrq);
6068
6069 /**
6070 * pcie_get_mps - get PCI Express maximum payload size
6071 * @dev: PCI device to query
6072 *
6073 * Returns maximum payload size in bytes
6074 */
pcie_get_mps(struct pci_dev * dev)6075 int pcie_get_mps(struct pci_dev *dev)
6076 {
6077 u16 ctl;
6078
6079 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
6080
6081 return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6082 }
6083 EXPORT_SYMBOL(pcie_get_mps);
6084
6085 /**
6086 * pcie_set_mps - set PCI Express maximum payload size
6087 * @dev: PCI device to query
6088 * @mps: maximum payload size in bytes
6089 * valid values are 128, 256, 512, 1024, 2048, 4096
6090 *
6091 * If possible sets maximum payload size
6092 */
pcie_set_mps(struct pci_dev * dev,int mps)6093 int pcie_set_mps(struct pci_dev *dev, int mps)
6094 {
6095 u16 v;
6096 int ret;
6097
6098 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
6099 return -EINVAL;
6100
6101 v = ffs(mps) - 8;
6102 if (v > dev->pcie_mpss)
6103 return -EINVAL;
6104 v <<= 5;
6105
6106 ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
6107 PCI_EXP_DEVCTL_PAYLOAD, v);
6108
6109 return pcibios_err_to_errno(ret);
6110 }
6111 EXPORT_SYMBOL(pcie_set_mps);
6112
6113 /**
6114 * pcie_bandwidth_available - determine minimum link settings of a PCIe
6115 * device and its bandwidth limitation
6116 * @dev: PCI device to query
6117 * @limiting_dev: storage for device causing the bandwidth limitation
6118 * @speed: storage for speed of limiting device
6119 * @width: storage for width of limiting device
6120 *
6121 * Walk up the PCI device chain and find the point where the minimum
6122 * bandwidth is available. Return the bandwidth available there and (if
6123 * limiting_dev, speed, and width pointers are supplied) information about
6124 * that point. The bandwidth returned is in Mb/s, i.e., megabits/second of
6125 * raw bandwidth.
6126 */
pcie_bandwidth_available(struct pci_dev * dev,struct pci_dev ** limiting_dev,enum pci_bus_speed * speed,enum pcie_link_width * width)6127 u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
6128 enum pci_bus_speed *speed,
6129 enum pcie_link_width *width)
6130 {
6131 u16 lnksta;
6132 enum pci_bus_speed next_speed;
6133 enum pcie_link_width next_width;
6134 u32 bw, next_bw;
6135
6136 if (speed)
6137 *speed = PCI_SPEED_UNKNOWN;
6138 if (width)
6139 *width = PCIE_LNK_WIDTH_UNKNOWN;
6140
6141 bw = 0;
6142
6143 while (dev) {
6144 pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
6145
6146 next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
6147 next_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta);
6148
6149 next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
6150
6151 /* Check if current device limits the total bandwidth */
6152 if (!bw || next_bw <= bw) {
6153 bw = next_bw;
6154
6155 if (limiting_dev)
6156 *limiting_dev = dev;
6157 if (speed)
6158 *speed = next_speed;
6159 if (width)
6160 *width = next_width;
6161 }
6162
6163 dev = pci_upstream_bridge(dev);
6164 }
6165
6166 return bw;
6167 }
6168 EXPORT_SYMBOL(pcie_bandwidth_available);
6169
6170 /**
6171 * pcie_get_speed_cap - query for the PCI device's link speed capability
6172 * @dev: PCI device to query
6173 *
6174 * Query the PCI device speed capability. Return the maximum link speed
6175 * supported by the device.
6176 */
pcie_get_speed_cap(struct pci_dev * dev)6177 enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
6178 {
6179 u32 lnkcap2, lnkcap;
6180
6181 /*
6182 * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18. The
6183 * implementation note there recommends using the Supported Link
6184 * Speeds Vector in Link Capabilities 2 when supported.
6185 *
6186 * Without Link Capabilities 2, i.e., prior to PCIe r3.0, software
6187 * should use the Supported Link Speeds field in Link Capabilities,
6188 * where only 2.5 GT/s and 5.0 GT/s speeds were defined.
6189 */
6190 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
6191
6192 /* PCIe r3.0-compliant */
6193 if (lnkcap2)
6194 return PCIE_LNKCAP2_SLS2SPEED(lnkcap2);
6195
6196 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
6197 if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
6198 return PCIE_SPEED_5_0GT;
6199 else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB)
6200 return PCIE_SPEED_2_5GT;
6201
6202 return PCI_SPEED_UNKNOWN;
6203 }
6204 EXPORT_SYMBOL(pcie_get_speed_cap);
6205
6206 /**
6207 * pcie_get_width_cap - query for the PCI device's link width capability
6208 * @dev: PCI device to query
6209 *
6210 * Query the PCI device width capability. Return the maximum link width
6211 * supported by the device.
6212 */
pcie_get_width_cap(struct pci_dev * dev)6213 enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
6214 {
6215 u32 lnkcap;
6216
6217 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
6218 if (lnkcap)
6219 return FIELD_GET(PCI_EXP_LNKCAP_MLW, lnkcap);
6220
6221 return PCIE_LNK_WIDTH_UNKNOWN;
6222 }
6223 EXPORT_SYMBOL(pcie_get_width_cap);
6224
6225 /**
6226 * pcie_bandwidth_capable - calculate a PCI device's link bandwidth capability
6227 * @dev: PCI device
6228 * @speed: storage for link speed
6229 * @width: storage for link width
6230 *
6231 * Calculate a PCI device's link bandwidth by querying for its link speed
6232 * and width, multiplying them, and applying encoding overhead. The result
6233 * is in Mb/s, i.e., megabits/second of raw bandwidth.
6234 */
pcie_bandwidth_capable(struct pci_dev * dev,enum pci_bus_speed * speed,enum pcie_link_width * width)6235 u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
6236 enum pcie_link_width *width)
6237 {
6238 *speed = pcie_get_speed_cap(dev);
6239 *width = pcie_get_width_cap(dev);
6240
6241 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
6242 return 0;
6243
6244 return *width * PCIE_SPEED2MBS_ENC(*speed);
6245 }
6246
6247 /**
6248 * __pcie_print_link_status - Report the PCI device's link speed and width
6249 * @dev: PCI device to query
6250 * @verbose: Print info even when enough bandwidth is available
6251 *
6252 * If the available bandwidth at the device is less than the device is
6253 * capable of, report the device's maximum possible bandwidth and the
6254 * upstream link that limits its performance. If @verbose, always print
6255 * the available bandwidth, even if the device isn't constrained.
6256 */
__pcie_print_link_status(struct pci_dev * dev,bool verbose)6257 void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
6258 {
6259 enum pcie_link_width width, width_cap;
6260 enum pci_bus_speed speed, speed_cap;
6261 struct pci_dev *limiting_dev = NULL;
6262 u32 bw_avail, bw_cap;
6263
6264 bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
6265 bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
6266
6267 if (bw_avail >= bw_cap && verbose)
6268 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
6269 bw_cap / 1000, bw_cap % 1000,
6270 pci_speed_string(speed_cap), width_cap);
6271 else if (bw_avail < bw_cap)
6272 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
6273 bw_avail / 1000, bw_avail % 1000,
6274 pci_speed_string(speed), width,
6275 limiting_dev ? pci_name(limiting_dev) : "<unknown>",
6276 bw_cap / 1000, bw_cap % 1000,
6277 pci_speed_string(speed_cap), width_cap);
6278 }
6279
6280 /**
6281 * pcie_print_link_status - Report the PCI device's link speed and width
6282 * @dev: PCI device to query
6283 *
6284 * Report the available bandwidth at the device.
6285 */
pcie_print_link_status(struct pci_dev * dev)6286 void pcie_print_link_status(struct pci_dev *dev)
6287 {
6288 __pcie_print_link_status(dev, true);
6289 }
6290 EXPORT_SYMBOL(pcie_print_link_status);
6291
6292 /**
6293 * pci_select_bars - Make BAR mask from the type of resource
6294 * @dev: the PCI device for which BAR mask is made
6295 * @flags: resource type mask to be selected
6296 *
6297 * This helper routine makes bar mask from the type of resource.
6298 */
pci_select_bars(struct pci_dev * dev,unsigned long flags)6299 int pci_select_bars(struct pci_dev *dev, unsigned long flags)
6300 {
6301 int i, bars = 0;
6302 for (i = 0; i < PCI_NUM_RESOURCES; i++)
6303 if (pci_resource_flags(dev, i) & flags)
6304 bars |= (1 << i);
6305 return bars;
6306 }
6307 EXPORT_SYMBOL(pci_select_bars);
6308
6309 /* Some architectures require additional programming to enable VGA */
6310 static arch_set_vga_state_t arch_set_vga_state;
6311
pci_register_set_vga_state(arch_set_vga_state_t func)6312 void __init pci_register_set_vga_state(arch_set_vga_state_t func)
6313 {
6314 arch_set_vga_state = func; /* NULL disables */
6315 }
6316
pci_set_vga_state_arch(struct pci_dev * dev,bool decode,unsigned int command_bits,u32 flags)6317 static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
6318 unsigned int command_bits, u32 flags)
6319 {
6320 if (arch_set_vga_state)
6321 return arch_set_vga_state(dev, decode, command_bits,
6322 flags);
6323 return 0;
6324 }
6325
6326 /**
6327 * pci_set_vga_state - set VGA decode state on device and parents if requested
6328 * @dev: the PCI device
6329 * @decode: true = enable decoding, false = disable decoding
6330 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
6331 * @flags: traverse ancestors and change bridges
6332 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
6333 */
pci_set_vga_state(struct pci_dev * dev,bool decode,unsigned int command_bits,u32 flags)6334 int pci_set_vga_state(struct pci_dev *dev, bool decode,
6335 unsigned int command_bits, u32 flags)
6336 {
6337 struct pci_bus *bus;
6338 struct pci_dev *bridge;
6339 u16 cmd;
6340 int rc;
6341
6342 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
6343
6344 /* ARCH specific VGA enables */
6345 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
6346 if (rc)
6347 return rc;
6348
6349 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
6350 pci_read_config_word(dev, PCI_COMMAND, &cmd);
6351 if (decode)
6352 cmd |= command_bits;
6353 else
6354 cmd &= ~command_bits;
6355 pci_write_config_word(dev, PCI_COMMAND, cmd);
6356 }
6357
6358 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
6359 return 0;
6360
6361 bus = dev->bus;
6362 while (bus) {
6363 bridge = bus->self;
6364 if (bridge) {
6365 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
6366 &cmd);
6367 if (decode)
6368 cmd |= PCI_BRIDGE_CTL_VGA;
6369 else
6370 cmd &= ~PCI_BRIDGE_CTL_VGA;
6371 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
6372 cmd);
6373 }
6374 bus = bus->parent;
6375 }
6376 return 0;
6377 }
6378
6379 #ifdef CONFIG_ACPI
pci_pr3_present(struct pci_dev * pdev)6380 bool pci_pr3_present(struct pci_dev *pdev)
6381 {
6382 struct acpi_device *adev;
6383
6384 if (acpi_disabled)
6385 return false;
6386
6387 adev = ACPI_COMPANION(&pdev->dev);
6388 if (!adev)
6389 return false;
6390
6391 return adev->power.flags.power_resources &&
6392 acpi_has_method(adev->handle, "_PR3");
6393 }
6394 EXPORT_SYMBOL_GPL(pci_pr3_present);
6395 #endif
6396
6397 /**
6398 * pci_add_dma_alias - Add a DMA devfn alias for a device
6399 * @dev: the PCI device for which alias is added
6400 * @devfn_from: alias slot and function
6401 * @nr_devfns: number of subsequent devfns to alias
6402 *
6403 * This helper encodes an 8-bit devfn as a bit number in dma_alias_mask
6404 * which is used to program permissible bus-devfn source addresses for DMA
6405 * requests in an IOMMU. These aliases factor into IOMMU group creation
6406 * and are useful for devices generating DMA requests beyond or different
6407 * from their logical bus-devfn. Examples include device quirks where the
6408 * device simply uses the wrong devfn, as well as non-transparent bridges
6409 * where the alias may be a proxy for devices in another domain.
6410 *
6411 * IOMMU group creation is performed during device discovery or addition,
6412 * prior to any potential DMA mapping and therefore prior to driver probing
6413 * (especially for userspace assigned devices where IOMMU group definition
6414 * cannot be left as a userspace activity). DMA aliases should therefore
6415 * be configured via quirks, such as the PCI fixup header quirk.
6416 */
pci_add_dma_alias(struct pci_dev * dev,u8 devfn_from,unsigned int nr_devfns)6417 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from,
6418 unsigned int nr_devfns)
6419 {
6420 int devfn_to;
6421
6422 nr_devfns = min(nr_devfns, (unsigned int)MAX_NR_DEVFNS - devfn_from);
6423 devfn_to = devfn_from + nr_devfns - 1;
6424
6425 if (!dev->dma_alias_mask)
6426 dev->dma_alias_mask = bitmap_zalloc(MAX_NR_DEVFNS, GFP_KERNEL);
6427 if (!dev->dma_alias_mask) {
6428 pci_warn(dev, "Unable to allocate DMA alias mask\n");
6429 return;
6430 }
6431
6432 bitmap_set(dev->dma_alias_mask, devfn_from, nr_devfns);
6433
6434 if (nr_devfns == 1)
6435 pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
6436 PCI_SLOT(devfn_from), PCI_FUNC(devfn_from));
6437 else if (nr_devfns > 1)
6438 pci_info(dev, "Enabling fixed DMA alias for devfn range from %02x.%d to %02x.%d\n",
6439 PCI_SLOT(devfn_from), PCI_FUNC(devfn_from),
6440 PCI_SLOT(devfn_to), PCI_FUNC(devfn_to));
6441 }
6442
pci_devs_are_dma_aliases(struct pci_dev * dev1,struct pci_dev * dev2)6443 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
6444 {
6445 return (dev1->dma_alias_mask &&
6446 test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
6447 (dev2->dma_alias_mask &&
6448 test_bit(dev1->devfn, dev2->dma_alias_mask)) ||
6449 pci_real_dma_dev(dev1) == dev2 ||
6450 pci_real_dma_dev(dev2) == dev1;
6451 }
6452
pci_device_is_present(struct pci_dev * pdev)6453 bool pci_device_is_present(struct pci_dev *pdev)
6454 {
6455 u32 v;
6456
6457 /* Check PF if pdev is a VF, since VF Vendor/Device IDs are 0xffff */
6458 pdev = pci_physfn(pdev);
6459 if (pci_dev_is_disconnected(pdev))
6460 return false;
6461 return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
6462 }
6463 EXPORT_SYMBOL_GPL(pci_device_is_present);
6464
pci_ignore_hotplug(struct pci_dev * dev)6465 void pci_ignore_hotplug(struct pci_dev *dev)
6466 {
6467 struct pci_dev *bridge = dev->bus->self;
6468
6469 dev->ignore_hotplug = 1;
6470 /* Propagate the "ignore hotplug" setting to the parent bridge. */
6471 if (bridge)
6472 bridge->ignore_hotplug = 1;
6473 }
6474 EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
6475
6476 /**
6477 * pci_real_dma_dev - Get PCI DMA device for PCI device
6478 * @dev: the PCI device that may have a PCI DMA alias
6479 *
6480 * Permits the platform to provide architecture-specific functionality to
6481 * devices needing to alias DMA to another PCI device on another PCI bus. If
6482 * the PCI device is on the same bus, it is recommended to use
6483 * pci_add_dma_alias(). This is the default implementation. Architecture
6484 * implementations can override this.
6485 */
pci_real_dma_dev(struct pci_dev * dev)6486 struct pci_dev __weak *pci_real_dma_dev(struct pci_dev *dev)
6487 {
6488 return dev;
6489 }
6490
pcibios_default_alignment(void)6491 resource_size_t __weak pcibios_default_alignment(void)
6492 {
6493 return 0;
6494 }
6495
6496 /*
6497 * Arches that don't want to expose struct resource to userland as-is in
6498 * sysfs and /proc can implement their own pci_resource_to_user().
6499 */
pci_resource_to_user(const struct pci_dev * dev,int bar,const struct resource * rsrc,resource_size_t * start,resource_size_t * end)6500 void __weak pci_resource_to_user(const struct pci_dev *dev, int bar,
6501 const struct resource *rsrc,
6502 resource_size_t *start, resource_size_t *end)
6503 {
6504 *start = rsrc->start;
6505 *end = rsrc->end;
6506 }
6507
6508 static char *resource_alignment_param;
6509 static DEFINE_SPINLOCK(resource_alignment_lock);
6510
6511 /**
6512 * pci_specified_resource_alignment - get resource alignment specified by user.
6513 * @dev: the PCI device to get
6514 * @resize: whether or not to change resources' size when reassigning alignment
6515 *
6516 * RETURNS: Resource alignment if it is specified.
6517 * Zero if it is not specified.
6518 */
pci_specified_resource_alignment(struct pci_dev * dev,bool * resize)6519 static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
6520 bool *resize)
6521 {
6522 int align_order, count;
6523 resource_size_t align = pcibios_default_alignment();
6524 const char *p;
6525 int ret;
6526
6527 spin_lock(&resource_alignment_lock);
6528 p = resource_alignment_param;
6529 if (!p || !*p)
6530 goto out;
6531 if (pci_has_flag(PCI_PROBE_ONLY)) {
6532 align = 0;
6533 pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
6534 goto out;
6535 }
6536
6537 while (*p) {
6538 count = 0;
6539 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
6540 p[count] == '@') {
6541 p += count + 1;
6542 if (align_order > 63) {
6543 pr_err("PCI: Invalid requested alignment (order %d)\n",
6544 align_order);
6545 align_order = PAGE_SHIFT;
6546 }
6547 } else {
6548 align_order = PAGE_SHIFT;
6549 }
6550
6551 ret = pci_dev_str_match(dev, p, &p);
6552 if (ret == 1) {
6553 *resize = true;
6554 align = 1ULL << align_order;
6555 break;
6556 } else if (ret < 0) {
6557 pr_err("PCI: Can't parse resource_alignment parameter: %s\n",
6558 p);
6559 break;
6560 }
6561
6562 if (*p != ';' && *p != ',') {
6563 /* End of param or invalid format */
6564 break;
6565 }
6566 p++;
6567 }
6568 out:
6569 spin_unlock(&resource_alignment_lock);
6570 return align;
6571 }
6572
pci_request_resource_alignment(struct pci_dev * dev,int bar,resource_size_t align,bool resize)6573 static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
6574 resource_size_t align, bool resize)
6575 {
6576 struct resource *r = &dev->resource[bar];
6577 resource_size_t size;
6578
6579 if (!(r->flags & IORESOURCE_MEM))
6580 return;
6581
6582 if (r->flags & IORESOURCE_PCI_FIXED) {
6583 pci_info(dev, "BAR%d %pR: ignoring requested alignment %#llx\n",
6584 bar, r, (unsigned long long)align);
6585 return;
6586 }
6587
6588 size = resource_size(r);
6589 if (size >= align)
6590 return;
6591
6592 /*
6593 * Increase the alignment of the resource. There are two ways we
6594 * can do this:
6595 *
6596 * 1) Increase the size of the resource. BARs are aligned on their
6597 * size, so when we reallocate space for this resource, we'll
6598 * allocate it with the larger alignment. This also prevents
6599 * assignment of any other BARs inside the alignment region, so
6600 * if we're requesting page alignment, this means no other BARs
6601 * will share the page.
6602 *
6603 * The disadvantage is that this makes the resource larger than
6604 * the hardware BAR, which may break drivers that compute things
6605 * based on the resource size, e.g., to find registers at a
6606 * fixed offset before the end of the BAR.
6607 *
6608 * 2) Retain the resource size, but use IORESOURCE_STARTALIGN and
6609 * set r->start to the desired alignment. By itself this
6610 * doesn't prevent other BARs being put inside the alignment
6611 * region, but if we realign *every* resource of every device in
6612 * the system, none of them will share an alignment region.
6613 *
6614 * When the user has requested alignment for only some devices via
6615 * the "pci=resource_alignment" argument, "resize" is true and we
6616 * use the first method. Otherwise we assume we're aligning all
6617 * devices and we use the second.
6618 */
6619
6620 pci_info(dev, "BAR%d %pR: requesting alignment to %#llx\n",
6621 bar, r, (unsigned long long)align);
6622
6623 if (resize) {
6624 r->start = 0;
6625 r->end = align - 1;
6626 } else {
6627 r->flags &= ~IORESOURCE_SIZEALIGN;
6628 r->flags |= IORESOURCE_STARTALIGN;
6629 r->start = align;
6630 r->end = r->start + size - 1;
6631 }
6632 r->flags |= IORESOURCE_UNSET;
6633 }
6634
6635 /*
6636 * This function disables memory decoding and releases memory resources
6637 * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
6638 * It also rounds up size to specified alignment.
6639 * Later on, the kernel will assign page-aligned memory resource back
6640 * to the device.
6641 */
pci_reassigndev_resource_alignment(struct pci_dev * dev)6642 void pci_reassigndev_resource_alignment(struct pci_dev *dev)
6643 {
6644 int i;
6645 struct resource *r;
6646 resource_size_t align;
6647 u16 command;
6648 bool resize = false;
6649
6650 /*
6651 * VF BARs are read-only zero according to SR-IOV spec r1.1, sec
6652 * 3.4.1.11. Their resources are allocated from the space
6653 * described by the VF BARx register in the PF's SR-IOV capability.
6654 * We can't influence their alignment here.
6655 */
6656 if (dev->is_virtfn)
6657 return;
6658
6659 /* check if specified PCI is target device to reassign */
6660 align = pci_specified_resource_alignment(dev, &resize);
6661 if (!align)
6662 return;
6663
6664 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
6665 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
6666 pci_warn(dev, "Can't reassign resources to host bridge\n");
6667 return;
6668 }
6669
6670 pci_read_config_word(dev, PCI_COMMAND, &command);
6671 command &= ~PCI_COMMAND_MEMORY;
6672 pci_write_config_word(dev, PCI_COMMAND, command);
6673
6674 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
6675 pci_request_resource_alignment(dev, i, align, resize);
6676
6677 /*
6678 * Need to disable bridge's resource window,
6679 * to enable the kernel to reassign new resource
6680 * window later on.
6681 */
6682 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
6683 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
6684 r = &dev->resource[i];
6685 if (!(r->flags & IORESOURCE_MEM))
6686 continue;
6687 r->flags |= IORESOURCE_UNSET;
6688 r->end = resource_size(r) - 1;
6689 r->start = 0;
6690 }
6691 pci_disable_bridge_window(dev);
6692 }
6693 }
6694
resource_alignment_show(struct bus_type * bus,char * buf)6695 static ssize_t resource_alignment_show(struct bus_type *bus, char *buf)
6696 {
6697 size_t count = 0;
6698
6699 spin_lock(&resource_alignment_lock);
6700 if (resource_alignment_param)
6701 count = sysfs_emit(buf, "%s\n", resource_alignment_param);
6702 spin_unlock(&resource_alignment_lock);
6703
6704 return count;
6705 }
6706
resource_alignment_store(struct bus_type * bus,const char * buf,size_t count)6707 static ssize_t resource_alignment_store(struct bus_type *bus,
6708 const char *buf, size_t count)
6709 {
6710 char *param, *old, *end;
6711
6712 if (count >= (PAGE_SIZE - 1))
6713 return -EINVAL;
6714
6715 param = kstrndup(buf, count, GFP_KERNEL);
6716 if (!param)
6717 return -ENOMEM;
6718
6719 end = strchr(param, '\n');
6720 if (end)
6721 *end = '\0';
6722
6723 spin_lock(&resource_alignment_lock);
6724 old = resource_alignment_param;
6725 if (strlen(param)) {
6726 resource_alignment_param = param;
6727 } else {
6728 kfree(param);
6729 resource_alignment_param = NULL;
6730 }
6731 spin_unlock(&resource_alignment_lock);
6732
6733 kfree(old);
6734
6735 return count;
6736 }
6737
6738 static BUS_ATTR_RW(resource_alignment);
6739
pci_resource_alignment_sysfs_init(void)6740 static int __init pci_resource_alignment_sysfs_init(void)
6741 {
6742 return bus_create_file(&pci_bus_type,
6743 &bus_attr_resource_alignment);
6744 }
6745 late_initcall(pci_resource_alignment_sysfs_init);
6746
pci_no_domains(void)6747 static void pci_no_domains(void)
6748 {
6749 #ifdef CONFIG_PCI_DOMAINS
6750 pci_domains_supported = 0;
6751 #endif
6752 }
6753
6754 #ifdef CONFIG_PCI_DOMAINS_GENERIC
6755 static atomic_t __domain_nr = ATOMIC_INIT(-1);
6756
pci_get_new_domain_nr(void)6757 static int pci_get_new_domain_nr(void)
6758 {
6759 return atomic_inc_return(&__domain_nr);
6760 }
6761
of_pci_bus_find_domain_nr(struct device * parent)6762 static int of_pci_bus_find_domain_nr(struct device *parent)
6763 {
6764 static int use_dt_domains = -1;
6765 int domain = -1;
6766
6767 if (parent)
6768 domain = of_get_pci_domain_nr(parent->of_node);
6769
6770 /*
6771 * Check DT domain and use_dt_domains values.
6772 *
6773 * If DT domain property is valid (domain >= 0) and
6774 * use_dt_domains != 0, the DT assignment is valid since this means
6775 * we have not previously allocated a domain number by using
6776 * pci_get_new_domain_nr(); we should also update use_dt_domains to
6777 * 1, to indicate that we have just assigned a domain number from
6778 * DT.
6779 *
6780 * If DT domain property value is not valid (ie domain < 0), and we
6781 * have not previously assigned a domain number from DT
6782 * (use_dt_domains != 1) we should assign a domain number by
6783 * using the:
6784 *
6785 * pci_get_new_domain_nr()
6786 *
6787 * API and update the use_dt_domains value to keep track of method we
6788 * are using to assign domain numbers (use_dt_domains = 0).
6789 *
6790 * All other combinations imply we have a platform that is trying
6791 * to mix domain numbers obtained from DT and pci_get_new_domain_nr(),
6792 * which is a recipe for domain mishandling and it is prevented by
6793 * invalidating the domain value (domain = -1) and printing a
6794 * corresponding error.
6795 */
6796 if (domain >= 0 && use_dt_domains) {
6797 use_dt_domains = 1;
6798 } else if (domain < 0 && use_dt_domains != 1) {
6799 use_dt_domains = 0;
6800 domain = pci_get_new_domain_nr();
6801 } else {
6802 if (parent)
6803 pr_err("Node %pOF has ", parent->of_node);
6804 pr_err("Inconsistent \"linux,pci-domain\" property in DT\n");
6805 domain = -1;
6806 }
6807
6808 return domain;
6809 }
6810
pci_bus_find_domain_nr(struct pci_bus * bus,struct device * parent)6811 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
6812 {
6813 return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
6814 acpi_pci_bus_find_domain_nr(bus);
6815 }
6816 #endif
6817
6818 /**
6819 * pci_ext_cfg_avail - can we access extended PCI config space?
6820 *
6821 * Returns 1 if we can access PCI extended config space (offsets
6822 * greater than 0xff). This is the default implementation. Architecture
6823 * implementations can override this.
6824 */
pci_ext_cfg_avail(void)6825 int __weak pci_ext_cfg_avail(void)
6826 {
6827 return 1;
6828 }
6829
pci_fixup_cardbus(struct pci_bus * bus)6830 void __weak pci_fixup_cardbus(struct pci_bus *bus)
6831 {
6832 }
6833 EXPORT_SYMBOL(pci_fixup_cardbus);
6834
pci_setup(char * str)6835 static int __init pci_setup(char *str)
6836 {
6837 while (str) {
6838 char *k = strchr(str, ',');
6839 if (k)
6840 *k++ = 0;
6841 if (*str && (str = pcibios_setup(str)) && *str) {
6842 if (!strcmp(str, "nomsi")) {
6843 pci_no_msi();
6844 } else if (!strncmp(str, "noats", 5)) {
6845 pr_info("PCIe: ATS is disabled\n");
6846 pcie_ats_disabled = true;
6847 } else if (!strcmp(str, "noaer")) {
6848 pci_no_aer();
6849 } else if (!strcmp(str, "earlydump")) {
6850 pci_early_dump = true;
6851 } else if (!strncmp(str, "realloc=", 8)) {
6852 pci_realloc_get_opt(str + 8);
6853 } else if (!strncmp(str, "realloc", 7)) {
6854 pci_realloc_get_opt("on");
6855 } else if (!strcmp(str, "nodomains")) {
6856 pci_no_domains();
6857 } else if (!strncmp(str, "noari", 5)) {
6858 pcie_ari_disabled = true;
6859 } else if (!strncmp(str, "cbiosize=", 9)) {
6860 pci_cardbus_io_size = memparse(str + 9, &str);
6861 } else if (!strncmp(str, "cbmemsize=", 10)) {
6862 pci_cardbus_mem_size = memparse(str + 10, &str);
6863 } else if (!strncmp(str, "resource_alignment=", 19)) {
6864 resource_alignment_param = str + 19;
6865 } else if (!strncmp(str, "ecrc=", 5)) {
6866 pcie_ecrc_get_policy(str + 5);
6867 } else if (!strncmp(str, "hpiosize=", 9)) {
6868 pci_hotplug_io_size = memparse(str + 9, &str);
6869 } else if (!strncmp(str, "hpmmiosize=", 11)) {
6870 pci_hotplug_mmio_size = memparse(str + 11, &str);
6871 } else if (!strncmp(str, "hpmmioprefsize=", 15)) {
6872 pci_hotplug_mmio_pref_size = memparse(str + 15, &str);
6873 } else if (!strncmp(str, "hpmemsize=", 10)) {
6874 pci_hotplug_mmio_size = memparse(str + 10, &str);
6875 pci_hotplug_mmio_pref_size = pci_hotplug_mmio_size;
6876 } else if (!strncmp(str, "hpbussize=", 10)) {
6877 pci_hotplug_bus_size =
6878 simple_strtoul(str + 10, &str, 0);
6879 if (pci_hotplug_bus_size > 0xff)
6880 pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
6881 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
6882 pcie_bus_config = PCIE_BUS_TUNE_OFF;
6883 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
6884 pcie_bus_config = PCIE_BUS_SAFE;
6885 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
6886 pcie_bus_config = PCIE_BUS_PERFORMANCE;
6887 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
6888 pcie_bus_config = PCIE_BUS_PEER2PEER;
6889 } else if (!strncmp(str, "pcie_scan_all", 13)) {
6890 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
6891 } else if (!strncmp(str, "disable_acs_redir=", 18)) {
6892 disable_acs_redir_param = str + 18;
6893 } else {
6894 pr_err("PCI: Unknown option `%s'\n", str);
6895 }
6896 }
6897 str = k;
6898 }
6899 return 0;
6900 }
6901 early_param("pci", pci_setup);
6902
6903 /*
6904 * 'resource_alignment_param' and 'disable_acs_redir_param' are initialized
6905 * in pci_setup(), above, to point to data in the __initdata section which
6906 * will be freed after the init sequence is complete. We can't allocate memory
6907 * in pci_setup() because some architectures do not have any memory allocation
6908 * service available during an early_param() call. So we allocate memory and
6909 * copy the variable here before the init section is freed.
6910 *
6911 */
pci_realloc_setup_params(void)6912 static int __init pci_realloc_setup_params(void)
6913 {
6914 resource_alignment_param = kstrdup(resource_alignment_param,
6915 GFP_KERNEL);
6916 disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
6917
6918 return 0;
6919 }
6920 pure_initcall(pci_realloc_setup_params);
6921