1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * PCI Bus Services, see include/linux/pci.h for further explanation.
4 *
5 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
6 * David Mosberger-Tang
7 *
8 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
9 */
10
11 #include <linux/acpi.h>
12 #include <linux/kernel.h>
13 #include <linux/delay.h>
14 #include <linux/dmi.h>
15 #include <linux/init.h>
16 #include <linux/of.h>
17 #include <linux/of_pci.h>
18 #include <linux/pci.h>
19 #include <linux/pm.h>
20 #include <linux/slab.h>
21 #include <linux/module.h>
22 #include <linux/spinlock.h>
23 #include <linux/string.h>
24 #include <linux/log2.h>
25 #include <linux/logic_pio.h>
26 #include <linux/pm_wakeup.h>
27 #include <linux/interrupt.h>
28 #include <linux/device.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/pci_hotplug.h>
31 #include <linux/vmalloc.h>
32 #include <linux/pci-ats.h>
33 #include <asm/setup.h>
34 #include <asm/dma.h>
35 #include <linux/aer.h>
36 #include "pci.h"
37
38 DEFINE_MUTEX(pci_slot_mutex);
39
40 const char *pci_power_names[] = {
41 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
42 };
43 EXPORT_SYMBOL_GPL(pci_power_names);
44
45 int isa_dma_bridge_buggy;
46 EXPORT_SYMBOL(isa_dma_bridge_buggy);
47
48 int pci_pci_problems;
49 EXPORT_SYMBOL(pci_pci_problems);
50
51 unsigned int pci_pm_d3_delay;
52
53 static void pci_pme_list_scan(struct work_struct *work);
54
55 static LIST_HEAD(pci_pme_list);
56 static DEFINE_MUTEX(pci_pme_list_mutex);
57 static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
58
59 struct pci_pme_device {
60 struct list_head list;
61 struct pci_dev *dev;
62 };
63
64 #define PME_TIMEOUT 1000 /* How long between PME checks */
65
pci_dev_d3_sleep(struct pci_dev * dev)66 static void pci_dev_d3_sleep(struct pci_dev *dev)
67 {
68 unsigned int delay = dev->d3_delay;
69
70 if (delay < pci_pm_d3_delay)
71 delay = pci_pm_d3_delay;
72
73 if (delay)
74 msleep(delay);
75 }
76
77 #ifdef CONFIG_PCI_DOMAINS
78 int pci_domains_supported = 1;
79 #endif
80
81 #define DEFAULT_CARDBUS_IO_SIZE (256)
82 #define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
83 /* pci=cbmemsize=nnM,cbiosize=nn can override this */
84 unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
85 unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
86
87 #define DEFAULT_HOTPLUG_IO_SIZE (256)
88 #define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024)
89 /* pci=hpmemsize=nnM,hpiosize=nn can override this */
90 unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
91 unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
92
93 #define DEFAULT_HOTPLUG_BUS_SIZE 1
94 unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
95
96 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
97
98 /*
99 * The default CLS is used if arch didn't set CLS explicitly and not
100 * all pci devices agree on the same value. Arch can override either
101 * the dfl or actual value as it sees fit. Don't forget this is
102 * measured in 32-bit words, not bytes.
103 */
104 u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
105 u8 pci_cache_line_size;
106
107 /*
108 * If we set up a device for bus mastering, we need to check the latency
109 * timer as certain BIOSes forget to set it properly.
110 */
111 unsigned int pcibios_max_latency = 255;
112
113 /* If set, the PCIe ARI capability will not be used. */
114 static bool pcie_ari_disabled;
115
116 /* If set, the PCIe ATS capability will not be used. */
117 static bool pcie_ats_disabled;
118
119 /* If set, the PCI config space of each device is printed during boot. */
120 bool pci_early_dump;
121
pci_ats_disabled(void)122 bool pci_ats_disabled(void)
123 {
124 return pcie_ats_disabled;
125 }
126 EXPORT_SYMBOL_GPL(pci_ats_disabled);
127
128 /* Disable bridge_d3 for all PCIe ports */
129 static bool pci_bridge_d3_disable;
130 /* Force bridge_d3 for all PCIe ports */
131 static bool pci_bridge_d3_force;
132
pcie_port_pm_setup(char * str)133 static int __init pcie_port_pm_setup(char *str)
134 {
135 if (!strcmp(str, "off"))
136 pci_bridge_d3_disable = true;
137 else if (!strcmp(str, "force"))
138 pci_bridge_d3_force = true;
139 return 1;
140 }
141 __setup("pcie_port_pm=", pcie_port_pm_setup);
142
143 /* Time to wait after a reset for device to become responsive */
144 #define PCIE_RESET_READY_POLL_MS 60000
145
146 /**
147 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
148 * @bus: pointer to PCI bus structure to search
149 *
150 * Given a PCI bus, returns the highest PCI bus number present in the set
151 * including the given PCI bus and its list of child PCI buses.
152 */
pci_bus_max_busnr(struct pci_bus * bus)153 unsigned char pci_bus_max_busnr(struct pci_bus *bus)
154 {
155 struct pci_bus *tmp;
156 unsigned char max, n;
157
158 max = bus->busn_res.end;
159 list_for_each_entry(tmp, &bus->children, node) {
160 n = pci_bus_max_busnr(tmp);
161 if (n > max)
162 max = n;
163 }
164 return max;
165 }
166 EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
167
168 #ifdef CONFIG_HAS_IOMEM
pci_ioremap_bar(struct pci_dev * pdev,int bar)169 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
170 {
171 struct resource *res = &pdev->resource[bar];
172
173 /*
174 * Make sure the BAR is actually a memory resource, not an IO resource
175 */
176 if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
177 pci_warn(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
178 return NULL;
179 }
180 return ioremap_nocache(res->start, resource_size(res));
181 }
182 EXPORT_SYMBOL_GPL(pci_ioremap_bar);
183
pci_ioremap_wc_bar(struct pci_dev * pdev,int bar)184 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
185 {
186 /*
187 * Make sure the BAR is actually a memory resource, not an IO resource
188 */
189 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
190 WARN_ON(1);
191 return NULL;
192 }
193 return ioremap_wc(pci_resource_start(pdev, bar),
194 pci_resource_len(pdev, bar));
195 }
196 EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
197 #endif
198
199 /**
200 * pci_dev_str_match_path - test if a path string matches a device
201 * @dev: the PCI device to test
202 * @path: string to match the device against
203 * @endptr: pointer to the string after the match
204 *
205 * Test if a string (typically from a kernel parameter) formatted as a
206 * path of device/function addresses matches a PCI device. The string must
207 * be of the form:
208 *
209 * [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
210 *
211 * A path for a device can be obtained using 'lspci -t'. Using a path
212 * is more robust against bus renumbering than using only a single bus,
213 * device and function address.
214 *
215 * Returns 1 if the string matches the device, 0 if it does not and
216 * a negative error code if it fails to parse the string.
217 */
pci_dev_str_match_path(struct pci_dev * dev,const char * path,const char ** endptr)218 static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
219 const char **endptr)
220 {
221 int ret;
222 int seg, bus, slot, func;
223 char *wpath, *p;
224 char end;
225
226 *endptr = strchrnul(path, ';');
227
228 wpath = kmemdup_nul(path, *endptr - path, GFP_KERNEL);
229 if (!wpath)
230 return -ENOMEM;
231
232 while (1) {
233 p = strrchr(wpath, '/');
234 if (!p)
235 break;
236 ret = sscanf(p, "/%x.%x%c", &slot, &func, &end);
237 if (ret != 2) {
238 ret = -EINVAL;
239 goto free_and_exit;
240 }
241
242 if (dev->devfn != PCI_DEVFN(slot, func)) {
243 ret = 0;
244 goto free_and_exit;
245 }
246
247 /*
248 * Note: we don't need to get a reference to the upstream
249 * bridge because we hold a reference to the top level
250 * device which should hold a reference to the bridge,
251 * and so on.
252 */
253 dev = pci_upstream_bridge(dev);
254 if (!dev) {
255 ret = 0;
256 goto free_and_exit;
257 }
258
259 *p = 0;
260 }
261
262 ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot,
263 &func, &end);
264 if (ret != 4) {
265 seg = 0;
266 ret = sscanf(wpath, "%x:%x.%x%c", &bus, &slot, &func, &end);
267 if (ret != 3) {
268 ret = -EINVAL;
269 goto free_and_exit;
270 }
271 }
272
273 ret = (seg == pci_domain_nr(dev->bus) &&
274 bus == dev->bus->number &&
275 dev->devfn == PCI_DEVFN(slot, func));
276
277 free_and_exit:
278 kfree(wpath);
279 return ret;
280 }
281
282 /**
283 * pci_dev_str_match - test if a string matches a device
284 * @dev: the PCI device to test
285 * @p: string to match the device against
286 * @endptr: pointer to the string after the match
287 *
288 * Test if a string (typically from a kernel parameter) matches a specified
289 * PCI device. The string may be of one of the following formats:
290 *
291 * [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
292 * pci:<vendor>:<device>[:<subvendor>:<subdevice>]
293 *
294 * The first format specifies a PCI bus/device/function address which
295 * may change if new hardware is inserted, if motherboard firmware changes,
296 * or due to changes caused in kernel parameters. If the domain is
297 * left unspecified, it is taken to be 0. In order to be robust against
298 * bus renumbering issues, a path of PCI device/function numbers may be used
299 * to address the specific device. The path for a device can be determined
300 * through the use of 'lspci -t'.
301 *
302 * The second format matches devices using IDs in the configuration
303 * space which may match multiple devices in the system. A value of 0
304 * for any field will match all devices. (Note: this differs from
305 * in-kernel code that uses PCI_ANY_ID which is ~0; this is for
306 * legacy reasons and convenience so users don't have to specify
307 * FFFFFFFFs on the command line.)
308 *
309 * Returns 1 if the string matches the device, 0 if it does not and
310 * a negative error code if the string cannot be parsed.
311 */
pci_dev_str_match(struct pci_dev * dev,const char * p,const char ** endptr)312 static int pci_dev_str_match(struct pci_dev *dev, const char *p,
313 const char **endptr)
314 {
315 int ret;
316 int count;
317 unsigned short vendor, device, subsystem_vendor, subsystem_device;
318
319 if (strncmp(p, "pci:", 4) == 0) {
320 /* PCI vendor/device (subvendor/subdevice) IDs are specified */
321 p += 4;
322 ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device,
323 &subsystem_vendor, &subsystem_device, &count);
324 if (ret != 4) {
325 ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count);
326 if (ret != 2)
327 return -EINVAL;
328
329 subsystem_vendor = 0;
330 subsystem_device = 0;
331 }
332
333 p += count;
334
335 if ((!vendor || vendor == dev->vendor) &&
336 (!device || device == dev->device) &&
337 (!subsystem_vendor ||
338 subsystem_vendor == dev->subsystem_vendor) &&
339 (!subsystem_device ||
340 subsystem_device == dev->subsystem_device))
341 goto found;
342 } else {
343 /*
344 * PCI Bus, Device, Function IDs are specified
345 * (optionally, may include a path of devfns following it)
346 */
347 ret = pci_dev_str_match_path(dev, p, &p);
348 if (ret < 0)
349 return ret;
350 else if (ret)
351 goto found;
352 }
353
354 *endptr = p;
355 return 0;
356
357 found:
358 *endptr = p;
359 return 1;
360 }
361
__pci_find_next_cap_ttl(struct pci_bus * bus,unsigned int devfn,u8 pos,int cap,int * ttl)362 static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
363 u8 pos, int cap, int *ttl)
364 {
365 u8 id;
366 u16 ent;
367
368 pci_bus_read_config_byte(bus, devfn, pos, &pos);
369
370 while ((*ttl)--) {
371 if (pos < 0x40)
372 break;
373 pos &= ~3;
374 pci_bus_read_config_word(bus, devfn, pos, &ent);
375
376 id = ent & 0xff;
377 if (id == 0xff)
378 break;
379 if (id == cap)
380 return pos;
381 pos = (ent >> 8);
382 }
383 return 0;
384 }
385
__pci_find_next_cap(struct pci_bus * bus,unsigned int devfn,u8 pos,int cap)386 static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
387 u8 pos, int cap)
388 {
389 int ttl = PCI_FIND_CAP_TTL;
390
391 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
392 }
393
pci_find_next_capability(struct pci_dev * dev,u8 pos,int cap)394 int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
395 {
396 return __pci_find_next_cap(dev->bus, dev->devfn,
397 pos + PCI_CAP_LIST_NEXT, cap);
398 }
399 EXPORT_SYMBOL_GPL(pci_find_next_capability);
400
__pci_bus_find_cap_start(struct pci_bus * bus,unsigned int devfn,u8 hdr_type)401 static int __pci_bus_find_cap_start(struct pci_bus *bus,
402 unsigned int devfn, u8 hdr_type)
403 {
404 u16 status;
405
406 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
407 if (!(status & PCI_STATUS_CAP_LIST))
408 return 0;
409
410 switch (hdr_type) {
411 case PCI_HEADER_TYPE_NORMAL:
412 case PCI_HEADER_TYPE_BRIDGE:
413 return PCI_CAPABILITY_LIST;
414 case PCI_HEADER_TYPE_CARDBUS:
415 return PCI_CB_CAPABILITY_LIST;
416 }
417
418 return 0;
419 }
420
421 /**
422 * pci_find_capability - query for devices' capabilities
423 * @dev: PCI device to query
424 * @cap: capability code
425 *
426 * Tell if a device supports a given PCI capability.
427 * Returns the address of the requested capability structure within the
428 * device's PCI configuration space or 0 in case the device does not
429 * support it. Possible values for @cap include:
430 *
431 * %PCI_CAP_ID_PM Power Management
432 * %PCI_CAP_ID_AGP Accelerated Graphics Port
433 * %PCI_CAP_ID_VPD Vital Product Data
434 * %PCI_CAP_ID_SLOTID Slot Identification
435 * %PCI_CAP_ID_MSI Message Signalled Interrupts
436 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
437 * %PCI_CAP_ID_PCIX PCI-X
438 * %PCI_CAP_ID_EXP PCI Express
439 */
pci_find_capability(struct pci_dev * dev,int cap)440 int pci_find_capability(struct pci_dev *dev, int cap)
441 {
442 int pos;
443
444 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
445 if (pos)
446 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
447
448 return pos;
449 }
450 EXPORT_SYMBOL(pci_find_capability);
451
452 /**
453 * pci_bus_find_capability - query for devices' capabilities
454 * @bus: the PCI bus to query
455 * @devfn: PCI device to query
456 * @cap: capability code
457 *
458 * Like pci_find_capability() but works for PCI devices that do not have a
459 * pci_dev structure set up yet.
460 *
461 * Returns the address of the requested capability structure within the
462 * device's PCI configuration space or 0 in case the device does not
463 * support it.
464 */
pci_bus_find_capability(struct pci_bus * bus,unsigned int devfn,int cap)465 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
466 {
467 int pos;
468 u8 hdr_type;
469
470 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
471
472 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
473 if (pos)
474 pos = __pci_find_next_cap(bus, devfn, pos, cap);
475
476 return pos;
477 }
478 EXPORT_SYMBOL(pci_bus_find_capability);
479
480 /**
481 * pci_find_next_ext_capability - Find an extended capability
482 * @dev: PCI device to query
483 * @start: address at which to start looking (0 to start at beginning of list)
484 * @cap: capability code
485 *
486 * Returns the address of the next matching extended capability structure
487 * within the device's PCI configuration space or 0 if the device does
488 * not support it. Some capabilities can occur several times, e.g., the
489 * vendor-specific capability, and this provides a way to find them all.
490 */
pci_find_next_ext_capability(struct pci_dev * dev,int start,int cap)491 int pci_find_next_ext_capability(struct pci_dev *dev, int start, int cap)
492 {
493 u32 header;
494 int ttl;
495 int pos = PCI_CFG_SPACE_SIZE;
496
497 /* minimum 8 bytes per capability */
498 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
499
500 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
501 return 0;
502
503 if (start)
504 pos = start;
505
506 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
507 return 0;
508
509 /*
510 * If we have no capabilities, this is indicated by cap ID,
511 * cap version and next pointer all being 0.
512 */
513 if (header == 0)
514 return 0;
515
516 while (ttl-- > 0) {
517 if (PCI_EXT_CAP_ID(header) == cap && pos != start)
518 return pos;
519
520 pos = PCI_EXT_CAP_NEXT(header);
521 if (pos < PCI_CFG_SPACE_SIZE)
522 break;
523
524 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
525 break;
526 }
527
528 return 0;
529 }
530 EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
531
532 /**
533 * pci_find_ext_capability - Find an extended capability
534 * @dev: PCI device to query
535 * @cap: capability code
536 *
537 * Returns the address of the requested extended capability structure
538 * within the device's PCI configuration space or 0 if the device does
539 * not support it. Possible values for @cap include:
540 *
541 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
542 * %PCI_EXT_CAP_ID_VC Virtual Channel
543 * %PCI_EXT_CAP_ID_DSN Device Serial Number
544 * %PCI_EXT_CAP_ID_PWR Power Budgeting
545 */
pci_find_ext_capability(struct pci_dev * dev,int cap)546 int pci_find_ext_capability(struct pci_dev *dev, int cap)
547 {
548 return pci_find_next_ext_capability(dev, 0, cap);
549 }
550 EXPORT_SYMBOL_GPL(pci_find_ext_capability);
551
__pci_find_next_ht_cap(struct pci_dev * dev,int pos,int ht_cap)552 static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
553 {
554 int rc, ttl = PCI_FIND_CAP_TTL;
555 u8 cap, mask;
556
557 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
558 mask = HT_3BIT_CAP_MASK;
559 else
560 mask = HT_5BIT_CAP_MASK;
561
562 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
563 PCI_CAP_ID_HT, &ttl);
564 while (pos) {
565 rc = pci_read_config_byte(dev, pos + 3, &cap);
566 if (rc != PCIBIOS_SUCCESSFUL)
567 return 0;
568
569 if ((cap & mask) == ht_cap)
570 return pos;
571
572 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
573 pos + PCI_CAP_LIST_NEXT,
574 PCI_CAP_ID_HT, &ttl);
575 }
576
577 return 0;
578 }
579 /**
580 * pci_find_next_ht_capability - query a device's Hypertransport capabilities
581 * @dev: PCI device to query
582 * @pos: Position from which to continue searching
583 * @ht_cap: Hypertransport capability code
584 *
585 * To be used in conjunction with pci_find_ht_capability() to search for
586 * all capabilities matching @ht_cap. @pos should always be a value returned
587 * from pci_find_ht_capability().
588 *
589 * NB. To be 100% safe against broken PCI devices, the caller should take
590 * steps to avoid an infinite loop.
591 */
pci_find_next_ht_capability(struct pci_dev * dev,int pos,int ht_cap)592 int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
593 {
594 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
595 }
596 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
597
598 /**
599 * pci_find_ht_capability - query a device's Hypertransport capabilities
600 * @dev: PCI device to query
601 * @ht_cap: Hypertransport capability code
602 *
603 * Tell if a device supports a given Hypertransport capability.
604 * Returns an address within the device's PCI configuration space
605 * or 0 in case the device does not support the request capability.
606 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
607 * which has a Hypertransport capability matching @ht_cap.
608 */
pci_find_ht_capability(struct pci_dev * dev,int ht_cap)609 int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
610 {
611 int pos;
612
613 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
614 if (pos)
615 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
616
617 return pos;
618 }
619 EXPORT_SYMBOL_GPL(pci_find_ht_capability);
620
621 /**
622 * pci_find_parent_resource - return resource region of parent bus of given
623 * region
624 * @dev: PCI device structure contains resources to be searched
625 * @res: child resource record for which parent is sought
626 *
627 * For given resource region of given device, return the resource region of
628 * parent bus the given region is contained in.
629 */
pci_find_parent_resource(const struct pci_dev * dev,struct resource * res)630 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
631 struct resource *res)
632 {
633 const struct pci_bus *bus = dev->bus;
634 struct resource *r;
635 int i;
636
637 pci_bus_for_each_resource(bus, r, i) {
638 if (!r)
639 continue;
640 if (resource_contains(r, res)) {
641
642 /*
643 * If the window is prefetchable but the BAR is
644 * not, the allocator made a mistake.
645 */
646 if (r->flags & IORESOURCE_PREFETCH &&
647 !(res->flags & IORESOURCE_PREFETCH))
648 return NULL;
649
650 /*
651 * If we're below a transparent bridge, there may
652 * be both a positively-decoded aperture and a
653 * subtractively-decoded region that contain the BAR.
654 * We want the positively-decoded one, so this depends
655 * on pci_bus_for_each_resource() giving us those
656 * first.
657 */
658 return r;
659 }
660 }
661 return NULL;
662 }
663 EXPORT_SYMBOL(pci_find_parent_resource);
664
665 /**
666 * pci_find_resource - Return matching PCI device resource
667 * @dev: PCI device to query
668 * @res: Resource to look for
669 *
670 * Goes over standard PCI resources (BARs) and checks if the given resource
671 * is partially or fully contained in any of them. In that case the
672 * matching resource is returned, %NULL otherwise.
673 */
pci_find_resource(struct pci_dev * dev,struct resource * res)674 struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
675 {
676 int i;
677
678 for (i = 0; i < PCI_ROM_RESOURCE; i++) {
679 struct resource *r = &dev->resource[i];
680
681 if (r->start && resource_contains(r, res))
682 return r;
683 }
684
685 return NULL;
686 }
687 EXPORT_SYMBOL(pci_find_resource);
688
689 /**
690 * pci_find_pcie_root_port - return PCIe Root Port
691 * @dev: PCI device to query
692 *
693 * Traverse up the parent chain and return the PCIe Root Port PCI Device
694 * for a given PCI Device.
695 */
pci_find_pcie_root_port(struct pci_dev * dev)696 struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev)
697 {
698 struct pci_dev *bridge, *highest_pcie_bridge = dev;
699
700 bridge = pci_upstream_bridge(dev);
701 while (bridge && pci_is_pcie(bridge)) {
702 highest_pcie_bridge = bridge;
703 bridge = pci_upstream_bridge(bridge);
704 }
705
706 if (pci_pcie_type(highest_pcie_bridge) != PCI_EXP_TYPE_ROOT_PORT)
707 return NULL;
708
709 return highest_pcie_bridge;
710 }
711 EXPORT_SYMBOL(pci_find_pcie_root_port);
712
713 /**
714 * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
715 * @dev: the PCI device to operate on
716 * @pos: config space offset of status word
717 * @mask: mask of bit(s) to care about in status word
718 *
719 * Return 1 when mask bit(s) in status word clear, 0 otherwise.
720 */
pci_wait_for_pending(struct pci_dev * dev,int pos,u16 mask)721 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
722 {
723 int i;
724
725 /* Wait for Transaction Pending bit clean */
726 for (i = 0; i < 4; i++) {
727 u16 status;
728 if (i)
729 msleep((1 << (i - 1)) * 100);
730
731 pci_read_config_word(dev, pos, &status);
732 if (!(status & mask))
733 return 1;
734 }
735
736 return 0;
737 }
738
739 /**
740 * pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
741 * @dev: PCI device to have its BARs restored
742 *
743 * Restore the BAR values for a given device, so as to make it
744 * accessible by its driver.
745 */
pci_restore_bars(struct pci_dev * dev)746 static void pci_restore_bars(struct pci_dev *dev)
747 {
748 int i;
749
750 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
751 pci_update_resource(dev, i);
752 }
753
754 static const struct pci_platform_pm_ops *pci_platform_pm;
755
pci_set_platform_pm(const struct pci_platform_pm_ops * ops)756 int pci_set_platform_pm(const struct pci_platform_pm_ops *ops)
757 {
758 if (!ops->is_manageable || !ops->set_state || !ops->get_state ||
759 !ops->choose_state || !ops->set_wakeup || !ops->need_resume)
760 return -EINVAL;
761 pci_platform_pm = ops;
762 return 0;
763 }
764
platform_pci_power_manageable(struct pci_dev * dev)765 static inline bool platform_pci_power_manageable(struct pci_dev *dev)
766 {
767 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
768 }
769
platform_pci_set_power_state(struct pci_dev * dev,pci_power_t t)770 static inline int platform_pci_set_power_state(struct pci_dev *dev,
771 pci_power_t t)
772 {
773 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
774 }
775
platform_pci_get_power_state(struct pci_dev * dev)776 static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
777 {
778 return pci_platform_pm ? pci_platform_pm->get_state(dev) : PCI_UNKNOWN;
779 }
780
platform_pci_refresh_power_state(struct pci_dev * dev)781 static inline void platform_pci_refresh_power_state(struct pci_dev *dev)
782 {
783 if (pci_platform_pm && pci_platform_pm->refresh_state)
784 pci_platform_pm->refresh_state(dev);
785 }
786
platform_pci_choose_state(struct pci_dev * dev)787 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
788 {
789 return pci_platform_pm ?
790 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
791 }
792
platform_pci_set_wakeup(struct pci_dev * dev,bool enable)793 static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable)
794 {
795 return pci_platform_pm ?
796 pci_platform_pm->set_wakeup(dev, enable) : -ENODEV;
797 }
798
platform_pci_need_resume(struct pci_dev * dev)799 static inline bool platform_pci_need_resume(struct pci_dev *dev)
800 {
801 return pci_platform_pm ? pci_platform_pm->need_resume(dev) : false;
802 }
803
platform_pci_bridge_d3(struct pci_dev * dev)804 static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
805 {
806 return pci_platform_pm ? pci_platform_pm->bridge_d3(dev) : false;
807 }
808
809 /**
810 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
811 * given PCI device
812 * @dev: PCI device to handle.
813 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
814 *
815 * RETURN VALUE:
816 * -EINVAL if the requested state is invalid.
817 * -EIO if device does not support PCI PM or its PM capabilities register has a
818 * wrong version, or device doesn't support the requested state.
819 * 0 if device already is in the requested state.
820 * 0 if device's power state has been successfully changed.
821 */
pci_raw_set_power_state(struct pci_dev * dev,pci_power_t state)822 static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
823 {
824 u16 pmcsr;
825 bool need_restore = false;
826
827 /* Check if we're already there */
828 if (dev->current_state == state)
829 return 0;
830
831 if (!dev->pm_cap)
832 return -EIO;
833
834 if (state < PCI_D0 || state > PCI_D3hot)
835 return -EINVAL;
836
837 /*
838 * Validate current state:
839 * Can enter D0 from any state, but if we can only go deeper
840 * to sleep if we're already in a low power state
841 */
842 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
843 && dev->current_state > state) {
844 pci_err(dev, "invalid power transition (from state %d to %d)\n",
845 dev->current_state, state);
846 return -EINVAL;
847 }
848
849 /* Check if this device supports the desired state */
850 if ((state == PCI_D1 && !dev->d1_support)
851 || (state == PCI_D2 && !dev->d2_support))
852 return -EIO;
853
854 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
855
856 /*
857 * If we're (effectively) in D3, force entire word to 0.
858 * This doesn't affect PME_Status, disables PME_En, and
859 * sets PowerState to 0.
860 */
861 switch (dev->current_state) {
862 case PCI_D0:
863 case PCI_D1:
864 case PCI_D2:
865 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
866 pmcsr |= state;
867 break;
868 case PCI_D3hot:
869 case PCI_D3cold:
870 case PCI_UNKNOWN: /* Boot-up */
871 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
872 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
873 need_restore = true;
874 /* Fall-through - force to D0 */
875 default:
876 pmcsr = 0;
877 break;
878 }
879
880 /* Enter specified state */
881 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
882
883 /*
884 * Mandatory power management transition delays; see PCI PM 1.1
885 * 5.6.1 table 18
886 */
887 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
888 pci_dev_d3_sleep(dev);
889 else if (state == PCI_D2 || dev->current_state == PCI_D2)
890 udelay(PCI_PM_D2_DELAY);
891
892 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
893 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
894 if (dev->current_state != state)
895 pci_info_ratelimited(dev, "Refused to change power state, currently in D%d\n",
896 dev->current_state);
897
898 /*
899 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
900 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
901 * from D3hot to D0 _may_ perform an internal reset, thereby
902 * going to "D0 Uninitialized" rather than "D0 Initialized".
903 * For example, at least some versions of the 3c905B and the
904 * 3c556B exhibit this behaviour.
905 *
906 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
907 * devices in a D3hot state at boot. Consequently, we need to
908 * restore at least the BARs so that the device will be
909 * accessible to its driver.
910 */
911 if (need_restore)
912 pci_restore_bars(dev);
913
914 if (dev->bus->self)
915 pcie_aspm_pm_state_change(dev->bus->self);
916
917 return 0;
918 }
919
920 /**
921 * pci_update_current_state - Read power state of given device and cache it
922 * @dev: PCI device to handle.
923 * @state: State to cache in case the device doesn't have the PM capability
924 *
925 * The power state is read from the PMCSR register, which however is
926 * inaccessible in D3cold. The platform firmware is therefore queried first
927 * to detect accessibility of the register. In case the platform firmware
928 * reports an incorrect state or the device isn't power manageable by the
929 * platform at all, we try to detect D3cold by testing accessibility of the
930 * vendor ID in config space.
931 */
pci_update_current_state(struct pci_dev * dev,pci_power_t state)932 void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
933 {
934 if (platform_pci_get_power_state(dev) == PCI_D3cold ||
935 !pci_device_is_present(dev)) {
936 dev->current_state = PCI_D3cold;
937 } else if (dev->pm_cap) {
938 u16 pmcsr;
939
940 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
941 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
942 } else {
943 dev->current_state = state;
944 }
945 }
946
947 /**
948 * pci_refresh_power_state - Refresh the given device's power state data
949 * @dev: Target PCI device.
950 *
951 * Ask the platform to refresh the devices power state information and invoke
952 * pci_update_current_state() to update its current PCI power state.
953 */
pci_refresh_power_state(struct pci_dev * dev)954 void pci_refresh_power_state(struct pci_dev *dev)
955 {
956 if (platform_pci_power_manageable(dev))
957 platform_pci_refresh_power_state(dev);
958
959 pci_update_current_state(dev, dev->current_state);
960 }
961
962 /**
963 * pci_platform_power_transition - Use platform to change device power state
964 * @dev: PCI device to handle.
965 * @state: State to put the device into.
966 */
pci_platform_power_transition(struct pci_dev * dev,pci_power_t state)967 static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
968 {
969 int error;
970
971 if (platform_pci_power_manageable(dev)) {
972 error = platform_pci_set_power_state(dev, state);
973 if (!error)
974 pci_update_current_state(dev, state);
975 } else
976 error = -ENODEV;
977
978 if (error && !dev->pm_cap) /* Fall back to PCI_D0 */
979 dev->current_state = PCI_D0;
980
981 return error;
982 }
983
984 /**
985 * pci_wakeup - Wake up a PCI device
986 * @pci_dev: Device to handle.
987 * @ign: ignored parameter
988 */
pci_wakeup(struct pci_dev * pci_dev,void * ign)989 static int pci_wakeup(struct pci_dev *pci_dev, void *ign)
990 {
991 pci_wakeup_event(pci_dev);
992 pm_request_resume(&pci_dev->dev);
993 return 0;
994 }
995
996 /**
997 * pci_wakeup_bus - Walk given bus and wake up devices on it
998 * @bus: Top bus of the subtree to walk.
999 */
pci_wakeup_bus(struct pci_bus * bus)1000 void pci_wakeup_bus(struct pci_bus *bus)
1001 {
1002 if (bus)
1003 pci_walk_bus(bus, pci_wakeup, NULL);
1004 }
1005
1006 /**
1007 * __pci_start_power_transition - Start power transition of a PCI device
1008 * @dev: PCI device to handle.
1009 * @state: State to put the device into.
1010 */
__pci_start_power_transition(struct pci_dev * dev,pci_power_t state)1011 static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
1012 {
1013 if (state == PCI_D0) {
1014 pci_platform_power_transition(dev, PCI_D0);
1015 /*
1016 * Mandatory power management transition delays, see
1017 * PCI Express Base Specification Revision 2.0 Section
1018 * 6.6.1: Conventional Reset. Do not delay for
1019 * devices powered on/off by corresponding bridge,
1020 * because have already delayed for the bridge.
1021 */
1022 if (dev->runtime_d3cold) {
1023 if (dev->d3cold_delay && !dev->imm_ready)
1024 msleep(dev->d3cold_delay);
1025 /*
1026 * When powering on a bridge from D3cold, the
1027 * whole hierarchy may be powered on into
1028 * D0uninitialized state, resume them to give
1029 * them a chance to suspend again
1030 */
1031 pci_wakeup_bus(dev->subordinate);
1032 }
1033 }
1034 }
1035
1036 /**
1037 * __pci_dev_set_current_state - Set current state of a PCI device
1038 * @dev: Device to handle
1039 * @data: pointer to state to be set
1040 */
__pci_dev_set_current_state(struct pci_dev * dev,void * data)1041 static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
1042 {
1043 pci_power_t state = *(pci_power_t *)data;
1044
1045 dev->current_state = state;
1046 return 0;
1047 }
1048
1049 /**
1050 * pci_bus_set_current_state - Walk given bus and set current state of devices
1051 * @bus: Top bus of the subtree to walk.
1052 * @state: state to be set
1053 */
pci_bus_set_current_state(struct pci_bus * bus,pci_power_t state)1054 void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
1055 {
1056 if (bus)
1057 pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1058 }
1059
1060 /**
1061 * __pci_complete_power_transition - Complete power transition of a PCI device
1062 * @dev: PCI device to handle.
1063 * @state: State to put the device into.
1064 *
1065 * This function should not be called directly by device drivers.
1066 */
__pci_complete_power_transition(struct pci_dev * dev,pci_power_t state)1067 int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
1068 {
1069 int ret;
1070
1071 if (state <= PCI_D0)
1072 return -EINVAL;
1073 ret = pci_platform_power_transition(dev, state);
1074 /* Power off the bridge may power off the whole hierarchy */
1075 if (!ret && state == PCI_D3cold)
1076 pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
1077 return ret;
1078 }
1079 EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
1080
1081 /**
1082 * pci_set_power_state - Set the power state of a PCI device
1083 * @dev: PCI device to handle.
1084 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
1085 *
1086 * Transition a device to a new power state, using the platform firmware and/or
1087 * the device's PCI PM registers.
1088 *
1089 * RETURN VALUE:
1090 * -EINVAL if the requested state is invalid.
1091 * -EIO if device does not support PCI PM or its PM capabilities register has a
1092 * wrong version, or device doesn't support the requested state.
1093 * 0 if the transition is to D1 or D2 but D1 and D2 are not supported.
1094 * 0 if device already is in the requested state.
1095 * 0 if the transition is to D3 but D3 is not supported.
1096 * 0 if device's power state has been successfully changed.
1097 */
pci_set_power_state(struct pci_dev * dev,pci_power_t state)1098 int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
1099 {
1100 int error;
1101
1102 /* Bound the state we're entering */
1103 if (state > PCI_D3cold)
1104 state = PCI_D3cold;
1105 else if (state < PCI_D0)
1106 state = PCI_D0;
1107 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
1108
1109 /*
1110 * If the device or the parent bridge do not support PCI
1111 * PM, ignore the request if we're doing anything other
1112 * than putting it into D0 (which would only happen on
1113 * boot).
1114 */
1115 return 0;
1116
1117 /* Check if we're already there */
1118 if (dev->current_state == state)
1119 return 0;
1120
1121 __pci_start_power_transition(dev, state);
1122
1123 /*
1124 * This device is quirked not to be put into D3, so don't put it in
1125 * D3
1126 */
1127 if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
1128 return 0;
1129
1130 /*
1131 * To put device in D3cold, we put device into D3hot in native
1132 * way, then put device into D3cold with platform ops
1133 */
1134 error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
1135 PCI_D3hot : state);
1136
1137 if (!__pci_complete_power_transition(dev, state))
1138 error = 0;
1139
1140 return error;
1141 }
1142 EXPORT_SYMBOL(pci_set_power_state);
1143
1144 /**
1145 * pci_power_up - Put the given device into D0 forcibly
1146 * @dev: PCI device to power up
1147 */
pci_power_up(struct pci_dev * dev)1148 void pci_power_up(struct pci_dev *dev)
1149 {
1150 __pci_start_power_transition(dev, PCI_D0);
1151 pci_raw_set_power_state(dev, PCI_D0);
1152 pci_update_current_state(dev, PCI_D0);
1153 }
1154
1155 /**
1156 * pci_choose_state - Choose the power state of a PCI device
1157 * @dev: PCI device to be suspended
1158 * @state: target sleep state for the whole system. This is the value
1159 * that is passed to suspend() function.
1160 *
1161 * Returns PCI power state suitable for given device and given system
1162 * message.
1163 */
pci_choose_state(struct pci_dev * dev,pm_message_t state)1164 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
1165 {
1166 pci_power_t ret;
1167
1168 if (!dev->pm_cap)
1169 return PCI_D0;
1170
1171 ret = platform_pci_choose_state(dev);
1172 if (ret != PCI_POWER_ERROR)
1173 return ret;
1174
1175 switch (state.event) {
1176 case PM_EVENT_ON:
1177 return PCI_D0;
1178 case PM_EVENT_FREEZE:
1179 case PM_EVENT_PRETHAW:
1180 /* REVISIT both freeze and pre-thaw "should" use D0 */
1181 case PM_EVENT_SUSPEND:
1182 case PM_EVENT_HIBERNATE:
1183 return PCI_D3hot;
1184 default:
1185 pci_info(dev, "unrecognized suspend event %d\n",
1186 state.event);
1187 BUG();
1188 }
1189 return PCI_D0;
1190 }
1191 EXPORT_SYMBOL(pci_choose_state);
1192
1193 #define PCI_EXP_SAVE_REGS 7
1194
_pci_find_saved_cap(struct pci_dev * pci_dev,u16 cap,bool extended)1195 static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
1196 u16 cap, bool extended)
1197 {
1198 struct pci_cap_saved_state *tmp;
1199
1200 hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
1201 if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
1202 return tmp;
1203 }
1204 return NULL;
1205 }
1206
pci_find_saved_cap(struct pci_dev * dev,char cap)1207 struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
1208 {
1209 return _pci_find_saved_cap(dev, cap, false);
1210 }
1211
pci_find_saved_ext_cap(struct pci_dev * dev,u16 cap)1212 struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
1213 {
1214 return _pci_find_saved_cap(dev, cap, true);
1215 }
1216
pci_save_pcie_state(struct pci_dev * dev)1217 static int pci_save_pcie_state(struct pci_dev *dev)
1218 {
1219 int i = 0;
1220 struct pci_cap_saved_state *save_state;
1221 u16 *cap;
1222
1223 if (!pci_is_pcie(dev))
1224 return 0;
1225
1226 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1227 if (!save_state) {
1228 pci_err(dev, "buffer not found in %s\n", __func__);
1229 return -ENOMEM;
1230 }
1231
1232 cap = (u16 *)&save_state->cap.data[0];
1233 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
1234 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
1235 pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
1236 pcie_capability_read_word(dev, PCI_EXP_RTCTL, &cap[i++]);
1237 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
1238 pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
1239 pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
1240
1241 return 0;
1242 }
1243
pci_restore_pcie_state(struct pci_dev * dev)1244 static void pci_restore_pcie_state(struct pci_dev *dev)
1245 {
1246 int i = 0;
1247 struct pci_cap_saved_state *save_state;
1248 u16 *cap;
1249
1250 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1251 if (!save_state)
1252 return;
1253
1254 cap = (u16 *)&save_state->cap.data[0];
1255 pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
1256 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
1257 pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
1258 pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
1259 pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
1260 pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
1261 pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
1262 }
1263
pci_save_pcix_state(struct pci_dev * dev)1264 static int pci_save_pcix_state(struct pci_dev *dev)
1265 {
1266 int pos;
1267 struct pci_cap_saved_state *save_state;
1268
1269 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1270 if (!pos)
1271 return 0;
1272
1273 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1274 if (!save_state) {
1275 pci_err(dev, "buffer not found in %s\n", __func__);
1276 return -ENOMEM;
1277 }
1278
1279 pci_read_config_word(dev, pos + PCI_X_CMD,
1280 (u16 *)save_state->cap.data);
1281
1282 return 0;
1283 }
1284
pci_restore_pcix_state(struct pci_dev * dev)1285 static void pci_restore_pcix_state(struct pci_dev *dev)
1286 {
1287 int i = 0, pos;
1288 struct pci_cap_saved_state *save_state;
1289 u16 *cap;
1290
1291 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1292 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1293 if (!save_state || !pos)
1294 return;
1295 cap = (u16 *)&save_state->cap.data[0];
1296
1297 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1298 }
1299
pci_save_ltr_state(struct pci_dev * dev)1300 static void pci_save_ltr_state(struct pci_dev *dev)
1301 {
1302 int ltr;
1303 struct pci_cap_saved_state *save_state;
1304 u16 *cap;
1305
1306 if (!pci_is_pcie(dev))
1307 return;
1308
1309 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1310 if (!ltr)
1311 return;
1312
1313 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1314 if (!save_state) {
1315 pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n");
1316 return;
1317 }
1318
1319 cap = (u16 *)&save_state->cap.data[0];
1320 pci_read_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap++);
1321 pci_read_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, cap++);
1322 }
1323
pci_restore_ltr_state(struct pci_dev * dev)1324 static void pci_restore_ltr_state(struct pci_dev *dev)
1325 {
1326 struct pci_cap_saved_state *save_state;
1327 int ltr;
1328 u16 *cap;
1329
1330 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1331 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1332 if (!save_state || !ltr)
1333 return;
1334
1335 cap = (u16 *)&save_state->cap.data[0];
1336 pci_write_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap++);
1337 pci_write_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, *cap++);
1338 }
1339
1340 /**
1341 * pci_save_state - save the PCI configuration space of a device before
1342 * suspending
1343 * @dev: PCI device that we're dealing with
1344 */
pci_save_state(struct pci_dev * dev)1345 int pci_save_state(struct pci_dev *dev)
1346 {
1347 int i;
1348 /* XXX: 100% dword access ok here? */
1349 for (i = 0; i < 16; i++)
1350 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1351 dev->state_saved = true;
1352
1353 i = pci_save_pcie_state(dev);
1354 if (i != 0)
1355 return i;
1356
1357 i = pci_save_pcix_state(dev);
1358 if (i != 0)
1359 return i;
1360
1361 pci_save_ltr_state(dev);
1362 pci_save_dpc_state(dev);
1363 return pci_save_vc_state(dev);
1364 }
1365 EXPORT_SYMBOL(pci_save_state);
1366
pci_restore_config_dword(struct pci_dev * pdev,int offset,u32 saved_val,int retry,bool force)1367 static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1368 u32 saved_val, int retry, bool force)
1369 {
1370 u32 val;
1371
1372 pci_read_config_dword(pdev, offset, &val);
1373 if (!force && val == saved_val)
1374 return;
1375
1376 for (;;) {
1377 pci_dbg(pdev, "restoring config space at offset %#x (was %#x, writing %#x)\n",
1378 offset, val, saved_val);
1379 pci_write_config_dword(pdev, offset, saved_val);
1380 if (retry-- <= 0)
1381 return;
1382
1383 pci_read_config_dword(pdev, offset, &val);
1384 if (val == saved_val)
1385 return;
1386
1387 mdelay(1);
1388 }
1389 }
1390
pci_restore_config_space_range(struct pci_dev * pdev,int start,int end,int retry,bool force)1391 static void pci_restore_config_space_range(struct pci_dev *pdev,
1392 int start, int end, int retry,
1393 bool force)
1394 {
1395 int index;
1396
1397 for (index = end; index >= start; index--)
1398 pci_restore_config_dword(pdev, 4 * index,
1399 pdev->saved_config_space[index],
1400 retry, force);
1401 }
1402
pci_restore_config_space(struct pci_dev * pdev)1403 static void pci_restore_config_space(struct pci_dev *pdev)
1404 {
1405 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1406 pci_restore_config_space_range(pdev, 10, 15, 0, false);
1407 /* Restore BARs before the command register. */
1408 pci_restore_config_space_range(pdev, 4, 9, 10, false);
1409 pci_restore_config_space_range(pdev, 0, 3, 0, false);
1410 } else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1411 pci_restore_config_space_range(pdev, 12, 15, 0, false);
1412
1413 /*
1414 * Force rewriting of prefetch registers to avoid S3 resume
1415 * issues on Intel PCI bridges that occur when these
1416 * registers are not explicitly written.
1417 */
1418 pci_restore_config_space_range(pdev, 9, 11, 0, true);
1419 pci_restore_config_space_range(pdev, 0, 8, 0, false);
1420 } else {
1421 pci_restore_config_space_range(pdev, 0, 15, 0, false);
1422 }
1423 }
1424
pci_restore_rebar_state(struct pci_dev * pdev)1425 static void pci_restore_rebar_state(struct pci_dev *pdev)
1426 {
1427 unsigned int pos, nbars, i;
1428 u32 ctrl;
1429
1430 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
1431 if (!pos)
1432 return;
1433
1434 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1435 nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
1436 PCI_REBAR_CTRL_NBAR_SHIFT;
1437
1438 for (i = 0; i < nbars; i++, pos += 8) {
1439 struct resource *res;
1440 int bar_idx, size;
1441
1442 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1443 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
1444 res = pdev->resource + bar_idx;
1445 size = ilog2(resource_size(res)) - 20;
1446 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
1447 ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
1448 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
1449 }
1450 }
1451
1452 /**
1453 * pci_restore_state - Restore the saved state of a PCI device
1454 * @dev: PCI device that we're dealing with
1455 */
pci_restore_state(struct pci_dev * dev)1456 void pci_restore_state(struct pci_dev *dev)
1457 {
1458 if (!dev->state_saved)
1459 return;
1460
1461 /*
1462 * Restore max latencies (in the LTR capability) before enabling
1463 * LTR itself (in the PCIe capability).
1464 */
1465 pci_restore_ltr_state(dev);
1466
1467 pci_restore_pcie_state(dev);
1468 pci_restore_pasid_state(dev);
1469 pci_restore_pri_state(dev);
1470 pci_restore_ats_state(dev);
1471 pci_restore_vc_state(dev);
1472 pci_restore_rebar_state(dev);
1473 pci_restore_dpc_state(dev);
1474
1475 pci_cleanup_aer_error_status_regs(dev);
1476
1477 pci_restore_config_space(dev);
1478
1479 pci_restore_pcix_state(dev);
1480 pci_restore_msi_state(dev);
1481
1482 /* Restore ACS and IOV configuration state */
1483 pci_enable_acs(dev);
1484 pci_restore_iov_state(dev);
1485
1486 dev->state_saved = false;
1487 }
1488 EXPORT_SYMBOL(pci_restore_state);
1489
1490 struct pci_saved_state {
1491 u32 config_space[16];
1492 struct pci_cap_saved_data cap[0];
1493 };
1494
1495 /**
1496 * pci_store_saved_state - Allocate and return an opaque struct containing
1497 * the device saved state.
1498 * @dev: PCI device that we're dealing with
1499 *
1500 * Return NULL if no state or error.
1501 */
pci_store_saved_state(struct pci_dev * dev)1502 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1503 {
1504 struct pci_saved_state *state;
1505 struct pci_cap_saved_state *tmp;
1506 struct pci_cap_saved_data *cap;
1507 size_t size;
1508
1509 if (!dev->state_saved)
1510 return NULL;
1511
1512 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1513
1514 hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1515 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1516
1517 state = kzalloc(size, GFP_KERNEL);
1518 if (!state)
1519 return NULL;
1520
1521 memcpy(state->config_space, dev->saved_config_space,
1522 sizeof(state->config_space));
1523
1524 cap = state->cap;
1525 hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1526 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1527 memcpy(cap, &tmp->cap, len);
1528 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1529 }
1530 /* Empty cap_save terminates list */
1531
1532 return state;
1533 }
1534 EXPORT_SYMBOL_GPL(pci_store_saved_state);
1535
1536 /**
1537 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1538 * @dev: PCI device that we're dealing with
1539 * @state: Saved state returned from pci_store_saved_state()
1540 */
pci_load_saved_state(struct pci_dev * dev,struct pci_saved_state * state)1541 int pci_load_saved_state(struct pci_dev *dev,
1542 struct pci_saved_state *state)
1543 {
1544 struct pci_cap_saved_data *cap;
1545
1546 dev->state_saved = false;
1547
1548 if (!state)
1549 return 0;
1550
1551 memcpy(dev->saved_config_space, state->config_space,
1552 sizeof(state->config_space));
1553
1554 cap = state->cap;
1555 while (cap->size) {
1556 struct pci_cap_saved_state *tmp;
1557
1558 tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
1559 if (!tmp || tmp->cap.size != cap->size)
1560 return -EINVAL;
1561
1562 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1563 cap = (struct pci_cap_saved_data *)((u8 *)cap +
1564 sizeof(struct pci_cap_saved_data) + cap->size);
1565 }
1566
1567 dev->state_saved = true;
1568 return 0;
1569 }
1570 EXPORT_SYMBOL_GPL(pci_load_saved_state);
1571
1572 /**
1573 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1574 * and free the memory allocated for it.
1575 * @dev: PCI device that we're dealing with
1576 * @state: Pointer to saved state returned from pci_store_saved_state()
1577 */
pci_load_and_free_saved_state(struct pci_dev * dev,struct pci_saved_state ** state)1578 int pci_load_and_free_saved_state(struct pci_dev *dev,
1579 struct pci_saved_state **state)
1580 {
1581 int ret = pci_load_saved_state(dev, *state);
1582 kfree(*state);
1583 *state = NULL;
1584 return ret;
1585 }
1586 EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1587
pcibios_enable_device(struct pci_dev * dev,int bars)1588 int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
1589 {
1590 return pci_enable_resources(dev, bars);
1591 }
1592
do_pci_enable_device(struct pci_dev * dev,int bars)1593 static int do_pci_enable_device(struct pci_dev *dev, int bars)
1594 {
1595 int err;
1596 struct pci_dev *bridge;
1597 u16 cmd;
1598 u8 pin;
1599
1600 err = pci_set_power_state(dev, PCI_D0);
1601 if (err < 0 && err != -EIO)
1602 return err;
1603
1604 bridge = pci_upstream_bridge(dev);
1605 if (bridge)
1606 pcie_aspm_powersave_config_link(bridge);
1607
1608 err = pcibios_enable_device(dev, bars);
1609 if (err < 0)
1610 return err;
1611 pci_fixup_device(pci_fixup_enable, dev);
1612
1613 if (dev->msi_enabled || dev->msix_enabled)
1614 return 0;
1615
1616 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
1617 if (pin) {
1618 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1619 if (cmd & PCI_COMMAND_INTX_DISABLE)
1620 pci_write_config_word(dev, PCI_COMMAND,
1621 cmd & ~PCI_COMMAND_INTX_DISABLE);
1622 }
1623
1624 return 0;
1625 }
1626
1627 /**
1628 * pci_reenable_device - Resume abandoned device
1629 * @dev: PCI device to be resumed
1630 *
1631 * NOTE: This function is a backend of pci_default_resume() and is not supposed
1632 * to be called by normal code, write proper resume handler and use it instead.
1633 */
pci_reenable_device(struct pci_dev * dev)1634 int pci_reenable_device(struct pci_dev *dev)
1635 {
1636 if (pci_is_enabled(dev))
1637 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1638 return 0;
1639 }
1640 EXPORT_SYMBOL(pci_reenable_device);
1641
pci_enable_bridge(struct pci_dev * dev)1642 static void pci_enable_bridge(struct pci_dev *dev)
1643 {
1644 struct pci_dev *bridge;
1645 int retval;
1646
1647 bridge = pci_upstream_bridge(dev);
1648 if (bridge)
1649 pci_enable_bridge(bridge);
1650
1651 if (pci_is_enabled(dev)) {
1652 if (!dev->is_busmaster)
1653 pci_set_master(dev);
1654 return;
1655 }
1656
1657 retval = pci_enable_device(dev);
1658 if (retval)
1659 pci_err(dev, "Error enabling bridge (%d), continuing\n",
1660 retval);
1661 pci_set_master(dev);
1662 }
1663
pci_enable_device_flags(struct pci_dev * dev,unsigned long flags)1664 static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
1665 {
1666 struct pci_dev *bridge;
1667 int err;
1668 int i, bars = 0;
1669
1670 /*
1671 * Power state could be unknown at this point, either due to a fresh
1672 * boot or a device removal call. So get the current power state
1673 * so that things like MSI message writing will behave as expected
1674 * (e.g. if the device really is in D0 at enable time).
1675 */
1676 if (dev->pm_cap) {
1677 u16 pmcsr;
1678 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1679 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1680 }
1681
1682 if (atomic_inc_return(&dev->enable_cnt) > 1)
1683 return 0; /* already enabled */
1684
1685 bridge = pci_upstream_bridge(dev);
1686 if (bridge)
1687 pci_enable_bridge(bridge);
1688
1689 /* only skip sriov related */
1690 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1691 if (dev->resource[i].flags & flags)
1692 bars |= (1 << i);
1693 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1694 if (dev->resource[i].flags & flags)
1695 bars |= (1 << i);
1696
1697 err = do_pci_enable_device(dev, bars);
1698 if (err < 0)
1699 atomic_dec(&dev->enable_cnt);
1700 return err;
1701 }
1702
1703 /**
1704 * pci_enable_device_io - Initialize a device for use with IO space
1705 * @dev: PCI device to be initialized
1706 *
1707 * Initialize device before it's used by a driver. Ask low-level code
1708 * to enable I/O resources. Wake up the device if it was suspended.
1709 * Beware, this function can fail.
1710 */
pci_enable_device_io(struct pci_dev * dev)1711 int pci_enable_device_io(struct pci_dev *dev)
1712 {
1713 return pci_enable_device_flags(dev, IORESOURCE_IO);
1714 }
1715 EXPORT_SYMBOL(pci_enable_device_io);
1716
1717 /**
1718 * pci_enable_device_mem - Initialize a device for use with Memory space
1719 * @dev: PCI device to be initialized
1720 *
1721 * Initialize device before it's used by a driver. Ask low-level code
1722 * to enable Memory resources. Wake up the device if it was suspended.
1723 * Beware, this function can fail.
1724 */
pci_enable_device_mem(struct pci_dev * dev)1725 int pci_enable_device_mem(struct pci_dev *dev)
1726 {
1727 return pci_enable_device_flags(dev, IORESOURCE_MEM);
1728 }
1729 EXPORT_SYMBOL(pci_enable_device_mem);
1730
1731 /**
1732 * pci_enable_device - Initialize device before it's used by a driver.
1733 * @dev: PCI device to be initialized
1734 *
1735 * Initialize device before it's used by a driver. Ask low-level code
1736 * to enable I/O and memory. Wake up the device if it was suspended.
1737 * Beware, this function can fail.
1738 *
1739 * Note we don't actually enable the device many times if we call
1740 * this function repeatedly (we just increment the count).
1741 */
pci_enable_device(struct pci_dev * dev)1742 int pci_enable_device(struct pci_dev *dev)
1743 {
1744 return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1745 }
1746 EXPORT_SYMBOL(pci_enable_device);
1747
1748 /*
1749 * Managed PCI resources. This manages device on/off, INTx/MSI/MSI-X
1750 * on/off and BAR regions. pci_dev itself records MSI/MSI-X status, so
1751 * there's no need to track it separately. pci_devres is initialized
1752 * when a device is enabled using managed PCI device enable interface.
1753 */
1754 struct pci_devres {
1755 unsigned int enabled:1;
1756 unsigned int pinned:1;
1757 unsigned int orig_intx:1;
1758 unsigned int restore_intx:1;
1759 unsigned int mwi:1;
1760 u32 region_mask;
1761 };
1762
pcim_release(struct device * gendev,void * res)1763 static void pcim_release(struct device *gendev, void *res)
1764 {
1765 struct pci_dev *dev = to_pci_dev(gendev);
1766 struct pci_devres *this = res;
1767 int i;
1768
1769 if (dev->msi_enabled)
1770 pci_disable_msi(dev);
1771 if (dev->msix_enabled)
1772 pci_disable_msix(dev);
1773
1774 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1775 if (this->region_mask & (1 << i))
1776 pci_release_region(dev, i);
1777
1778 if (this->mwi)
1779 pci_clear_mwi(dev);
1780
1781 if (this->restore_intx)
1782 pci_intx(dev, this->orig_intx);
1783
1784 if (this->enabled && !this->pinned)
1785 pci_disable_device(dev);
1786 }
1787
get_pci_dr(struct pci_dev * pdev)1788 static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
1789 {
1790 struct pci_devres *dr, *new_dr;
1791
1792 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1793 if (dr)
1794 return dr;
1795
1796 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1797 if (!new_dr)
1798 return NULL;
1799 return devres_get(&pdev->dev, new_dr, NULL, NULL);
1800 }
1801
find_pci_dr(struct pci_dev * pdev)1802 static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
1803 {
1804 if (pci_is_managed(pdev))
1805 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1806 return NULL;
1807 }
1808
1809 /**
1810 * pcim_enable_device - Managed pci_enable_device()
1811 * @pdev: PCI device to be initialized
1812 *
1813 * Managed pci_enable_device().
1814 */
pcim_enable_device(struct pci_dev * pdev)1815 int pcim_enable_device(struct pci_dev *pdev)
1816 {
1817 struct pci_devres *dr;
1818 int rc;
1819
1820 dr = get_pci_dr(pdev);
1821 if (unlikely(!dr))
1822 return -ENOMEM;
1823 if (dr->enabled)
1824 return 0;
1825
1826 rc = pci_enable_device(pdev);
1827 if (!rc) {
1828 pdev->is_managed = 1;
1829 dr->enabled = 1;
1830 }
1831 return rc;
1832 }
1833 EXPORT_SYMBOL(pcim_enable_device);
1834
1835 /**
1836 * pcim_pin_device - Pin managed PCI device
1837 * @pdev: PCI device to pin
1838 *
1839 * Pin managed PCI device @pdev. Pinned device won't be disabled on
1840 * driver detach. @pdev must have been enabled with
1841 * pcim_enable_device().
1842 */
pcim_pin_device(struct pci_dev * pdev)1843 void pcim_pin_device(struct pci_dev *pdev)
1844 {
1845 struct pci_devres *dr;
1846
1847 dr = find_pci_dr(pdev);
1848 WARN_ON(!dr || !dr->enabled);
1849 if (dr)
1850 dr->pinned = 1;
1851 }
1852 EXPORT_SYMBOL(pcim_pin_device);
1853
1854 /*
1855 * pcibios_add_device - provide arch specific hooks when adding device dev
1856 * @dev: the PCI device being added
1857 *
1858 * Permits the platform to provide architecture specific functionality when
1859 * devices are added. This is the default implementation. Architecture
1860 * implementations can override this.
1861 */
pcibios_add_device(struct pci_dev * dev)1862 int __weak pcibios_add_device(struct pci_dev *dev)
1863 {
1864 return 0;
1865 }
1866
1867 /**
1868 * pcibios_release_device - provide arch specific hooks when releasing
1869 * device dev
1870 * @dev: the PCI device being released
1871 *
1872 * Permits the platform to provide architecture specific functionality when
1873 * devices are released. This is the default implementation. Architecture
1874 * implementations can override this.
1875 */
pcibios_release_device(struct pci_dev * dev)1876 void __weak pcibios_release_device(struct pci_dev *dev) {}
1877
1878 /**
1879 * pcibios_disable_device - disable arch specific PCI resources for device dev
1880 * @dev: the PCI device to disable
1881 *
1882 * Disables architecture specific PCI resources for the device. This
1883 * is the default implementation. Architecture implementations can
1884 * override this.
1885 */
pcibios_disable_device(struct pci_dev * dev)1886 void __weak pcibios_disable_device(struct pci_dev *dev) {}
1887
1888 /**
1889 * pcibios_penalize_isa_irq - penalize an ISA IRQ
1890 * @irq: ISA IRQ to penalize
1891 * @active: IRQ active or not
1892 *
1893 * Permits the platform to provide architecture-specific functionality when
1894 * penalizing ISA IRQs. This is the default implementation. Architecture
1895 * implementations can override this.
1896 */
pcibios_penalize_isa_irq(int irq,int active)1897 void __weak pcibios_penalize_isa_irq(int irq, int active) {}
1898
do_pci_disable_device(struct pci_dev * dev)1899 static void do_pci_disable_device(struct pci_dev *dev)
1900 {
1901 u16 pci_command;
1902
1903 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1904 if (pci_command & PCI_COMMAND_MASTER) {
1905 pci_command &= ~PCI_COMMAND_MASTER;
1906 pci_write_config_word(dev, PCI_COMMAND, pci_command);
1907 }
1908
1909 pcibios_disable_device(dev);
1910 }
1911
1912 /**
1913 * pci_disable_enabled_device - Disable device without updating enable_cnt
1914 * @dev: PCI device to disable
1915 *
1916 * NOTE: This function is a backend of PCI power management routines and is
1917 * not supposed to be called drivers.
1918 */
pci_disable_enabled_device(struct pci_dev * dev)1919 void pci_disable_enabled_device(struct pci_dev *dev)
1920 {
1921 if (pci_is_enabled(dev))
1922 do_pci_disable_device(dev);
1923 }
1924
1925 /**
1926 * pci_disable_device - Disable PCI device after use
1927 * @dev: PCI device to be disabled
1928 *
1929 * Signal to the system that the PCI device is not in use by the system
1930 * anymore. This only involves disabling PCI bus-mastering, if active.
1931 *
1932 * Note we don't actually disable the device until all callers of
1933 * pci_enable_device() have called pci_disable_device().
1934 */
pci_disable_device(struct pci_dev * dev)1935 void pci_disable_device(struct pci_dev *dev)
1936 {
1937 struct pci_devres *dr;
1938
1939 dr = find_pci_dr(dev);
1940 if (dr)
1941 dr->enabled = 0;
1942
1943 dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
1944 "disabling already-disabled device");
1945
1946 if (atomic_dec_return(&dev->enable_cnt) != 0)
1947 return;
1948
1949 do_pci_disable_device(dev);
1950
1951 dev->is_busmaster = 0;
1952 }
1953 EXPORT_SYMBOL(pci_disable_device);
1954
1955 /**
1956 * pcibios_set_pcie_reset_state - set reset state for device dev
1957 * @dev: the PCIe device reset
1958 * @state: Reset state to enter into
1959 *
1960 * Set the PCIe reset state for the device. This is the default
1961 * implementation. Architecture implementations can override this.
1962 */
pcibios_set_pcie_reset_state(struct pci_dev * dev,enum pcie_reset_state state)1963 int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
1964 enum pcie_reset_state state)
1965 {
1966 return -EINVAL;
1967 }
1968
1969 /**
1970 * pci_set_pcie_reset_state - set reset state for device dev
1971 * @dev: the PCIe device reset
1972 * @state: Reset state to enter into
1973 *
1974 * Sets the PCI reset state for the device.
1975 */
pci_set_pcie_reset_state(struct pci_dev * dev,enum pcie_reset_state state)1976 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1977 {
1978 return pcibios_set_pcie_reset_state(dev, state);
1979 }
1980 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
1981
1982 /**
1983 * pcie_clear_root_pme_status - Clear root port PME interrupt status.
1984 * @dev: PCIe root port or event collector.
1985 */
pcie_clear_root_pme_status(struct pci_dev * dev)1986 void pcie_clear_root_pme_status(struct pci_dev *dev)
1987 {
1988 pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME);
1989 }
1990
1991 /**
1992 * pci_check_pme_status - Check if given device has generated PME.
1993 * @dev: Device to check.
1994 *
1995 * Check the PME status of the device and if set, clear it and clear PME enable
1996 * (if set). Return 'true' if PME status and PME enable were both set or
1997 * 'false' otherwise.
1998 */
pci_check_pme_status(struct pci_dev * dev)1999 bool pci_check_pme_status(struct pci_dev *dev)
2000 {
2001 int pmcsr_pos;
2002 u16 pmcsr;
2003 bool ret = false;
2004
2005 if (!dev->pm_cap)
2006 return false;
2007
2008 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
2009 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
2010 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
2011 return false;
2012
2013 /* Clear PME status. */
2014 pmcsr |= PCI_PM_CTRL_PME_STATUS;
2015 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
2016 /* Disable PME to avoid interrupt flood. */
2017 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2018 ret = true;
2019 }
2020
2021 pci_write_config_word(dev, pmcsr_pos, pmcsr);
2022
2023 return ret;
2024 }
2025
2026 /**
2027 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
2028 * @dev: Device to handle.
2029 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
2030 *
2031 * Check if @dev has generated PME and queue a resume request for it in that
2032 * case.
2033 */
pci_pme_wakeup(struct pci_dev * dev,void * pme_poll_reset)2034 static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
2035 {
2036 if (pme_poll_reset && dev->pme_poll)
2037 dev->pme_poll = false;
2038
2039 if (pci_check_pme_status(dev)) {
2040 pci_wakeup_event(dev);
2041 pm_request_resume(&dev->dev);
2042 }
2043 return 0;
2044 }
2045
2046 /**
2047 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
2048 * @bus: Top bus of the subtree to walk.
2049 */
pci_pme_wakeup_bus(struct pci_bus * bus)2050 void pci_pme_wakeup_bus(struct pci_bus *bus)
2051 {
2052 if (bus)
2053 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
2054 }
2055
2056
2057 /**
2058 * pci_pme_capable - check the capability of PCI device to generate PME#
2059 * @dev: PCI device to handle.
2060 * @state: PCI state from which device will issue PME#.
2061 */
pci_pme_capable(struct pci_dev * dev,pci_power_t state)2062 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
2063 {
2064 if (!dev->pm_cap)
2065 return false;
2066
2067 return !!(dev->pme_support & (1 << state));
2068 }
2069 EXPORT_SYMBOL(pci_pme_capable);
2070
pci_pme_list_scan(struct work_struct * work)2071 static void pci_pme_list_scan(struct work_struct *work)
2072 {
2073 struct pci_pme_device *pme_dev, *n;
2074
2075 mutex_lock(&pci_pme_list_mutex);
2076 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
2077 if (pme_dev->dev->pme_poll) {
2078 struct pci_dev *bridge;
2079
2080 bridge = pme_dev->dev->bus->self;
2081 /*
2082 * If bridge is in low power state, the
2083 * configuration space of subordinate devices
2084 * may be not accessible
2085 */
2086 if (bridge && bridge->current_state != PCI_D0)
2087 continue;
2088 /*
2089 * If the device is in D3cold it should not be
2090 * polled either.
2091 */
2092 if (pme_dev->dev->current_state == PCI_D3cold)
2093 continue;
2094
2095 pci_pme_wakeup(pme_dev->dev, NULL);
2096 } else {
2097 list_del(&pme_dev->list);
2098 kfree(pme_dev);
2099 }
2100 }
2101 if (!list_empty(&pci_pme_list))
2102 queue_delayed_work(system_freezable_wq, &pci_pme_work,
2103 msecs_to_jiffies(PME_TIMEOUT));
2104 mutex_unlock(&pci_pme_list_mutex);
2105 }
2106
__pci_pme_active(struct pci_dev * dev,bool enable)2107 static void __pci_pme_active(struct pci_dev *dev, bool enable)
2108 {
2109 u16 pmcsr;
2110
2111 if (!dev->pme_support)
2112 return;
2113
2114 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2115 /* Clear PME_Status by writing 1 to it and enable PME# */
2116 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
2117 if (!enable)
2118 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2119
2120 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2121 }
2122
2123 /**
2124 * pci_pme_restore - Restore PME configuration after config space restore.
2125 * @dev: PCI device to update.
2126 */
pci_pme_restore(struct pci_dev * dev)2127 void pci_pme_restore(struct pci_dev *dev)
2128 {
2129 u16 pmcsr;
2130
2131 if (!dev->pme_support)
2132 return;
2133
2134 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2135 if (dev->wakeup_prepared) {
2136 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2137 pmcsr &= ~PCI_PM_CTRL_PME_STATUS;
2138 } else {
2139 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2140 pmcsr |= PCI_PM_CTRL_PME_STATUS;
2141 }
2142 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2143 }
2144
2145 /**
2146 * pci_pme_active - enable or disable PCI device's PME# function
2147 * @dev: PCI device to handle.
2148 * @enable: 'true' to enable PME# generation; 'false' to disable it.
2149 *
2150 * The caller must verify that the device is capable of generating PME# before
2151 * calling this function with @enable equal to 'true'.
2152 */
pci_pme_active(struct pci_dev * dev,bool enable)2153 void pci_pme_active(struct pci_dev *dev, bool enable)
2154 {
2155 __pci_pme_active(dev, enable);
2156
2157 /*
2158 * PCI (as opposed to PCIe) PME requires that the device have
2159 * its PME# line hooked up correctly. Not all hardware vendors
2160 * do this, so the PME never gets delivered and the device
2161 * remains asleep. The easiest way around this is to
2162 * periodically walk the list of suspended devices and check
2163 * whether any have their PME flag set. The assumption is that
2164 * we'll wake up often enough anyway that this won't be a huge
2165 * hit, and the power savings from the devices will still be a
2166 * win.
2167 *
2168 * Although PCIe uses in-band PME message instead of PME# line
2169 * to report PME, PME does not work for some PCIe devices in
2170 * reality. For example, there are devices that set their PME
2171 * status bits, but don't really bother to send a PME message;
2172 * there are PCI Express Root Ports that don't bother to
2173 * trigger interrupts when they receive PME messages from the
2174 * devices below. So PME poll is used for PCIe devices too.
2175 */
2176
2177 if (dev->pme_poll) {
2178 struct pci_pme_device *pme_dev;
2179 if (enable) {
2180 pme_dev = kmalloc(sizeof(struct pci_pme_device),
2181 GFP_KERNEL);
2182 if (!pme_dev) {
2183 pci_warn(dev, "can't enable PME#\n");
2184 return;
2185 }
2186 pme_dev->dev = dev;
2187 mutex_lock(&pci_pme_list_mutex);
2188 list_add(&pme_dev->list, &pci_pme_list);
2189 if (list_is_singular(&pci_pme_list))
2190 queue_delayed_work(system_freezable_wq,
2191 &pci_pme_work,
2192 msecs_to_jiffies(PME_TIMEOUT));
2193 mutex_unlock(&pci_pme_list_mutex);
2194 } else {
2195 mutex_lock(&pci_pme_list_mutex);
2196 list_for_each_entry(pme_dev, &pci_pme_list, list) {
2197 if (pme_dev->dev == dev) {
2198 list_del(&pme_dev->list);
2199 kfree(pme_dev);
2200 break;
2201 }
2202 }
2203 mutex_unlock(&pci_pme_list_mutex);
2204 }
2205 }
2206
2207 pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled");
2208 }
2209 EXPORT_SYMBOL(pci_pme_active);
2210
2211 /**
2212 * __pci_enable_wake - enable PCI device as wakeup event source
2213 * @dev: PCI device affected
2214 * @state: PCI state from which device will issue wakeup events
2215 * @enable: True to enable event generation; false to disable
2216 *
2217 * This enables the device as a wakeup event source, or disables it.
2218 * When such events involves platform-specific hooks, those hooks are
2219 * called automatically by this routine.
2220 *
2221 * Devices with legacy power management (no standard PCI PM capabilities)
2222 * always require such platform hooks.
2223 *
2224 * RETURN VALUE:
2225 * 0 is returned on success
2226 * -EINVAL is returned if device is not supposed to wake up the system
2227 * Error code depending on the platform is returned if both the platform and
2228 * the native mechanism fail to enable the generation of wake-up events
2229 */
__pci_enable_wake(struct pci_dev * dev,pci_power_t state,bool enable)2230 static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
2231 {
2232 int ret = 0;
2233
2234 /*
2235 * Bridges that are not power-manageable directly only signal
2236 * wakeup on behalf of subordinate devices which is set up
2237 * elsewhere, so skip them. However, bridges that are
2238 * power-manageable may signal wakeup for themselves (for example,
2239 * on a hotplug event) and they need to be covered here.
2240 */
2241 if (!pci_power_manageable(dev))
2242 return 0;
2243
2244 /* Don't do the same thing twice in a row for one device. */
2245 if (!!enable == !!dev->wakeup_prepared)
2246 return 0;
2247
2248 /*
2249 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
2250 * Anderson we should be doing PME# wake enable followed by ACPI wake
2251 * enable. To disable wake-up we call the platform first, for symmetry.
2252 */
2253
2254 if (enable) {
2255 int error;
2256
2257 if (pci_pme_capable(dev, state))
2258 pci_pme_active(dev, true);
2259 else
2260 ret = 1;
2261 error = platform_pci_set_wakeup(dev, true);
2262 if (ret)
2263 ret = error;
2264 if (!ret)
2265 dev->wakeup_prepared = true;
2266 } else {
2267 platform_pci_set_wakeup(dev, false);
2268 pci_pme_active(dev, false);
2269 dev->wakeup_prepared = false;
2270 }
2271
2272 return ret;
2273 }
2274
2275 /**
2276 * pci_enable_wake - change wakeup settings for a PCI device
2277 * @pci_dev: Target device
2278 * @state: PCI state from which device will issue wakeup events
2279 * @enable: Whether or not to enable event generation
2280 *
2281 * If @enable is set, check device_may_wakeup() for the device before calling
2282 * __pci_enable_wake() for it.
2283 */
pci_enable_wake(struct pci_dev * pci_dev,pci_power_t state,bool enable)2284 int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
2285 {
2286 if (enable && !device_may_wakeup(&pci_dev->dev))
2287 return -EINVAL;
2288
2289 return __pci_enable_wake(pci_dev, state, enable);
2290 }
2291 EXPORT_SYMBOL(pci_enable_wake);
2292
2293 /**
2294 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
2295 * @dev: PCI device to prepare
2296 * @enable: True to enable wake-up event generation; false to disable
2297 *
2298 * Many drivers want the device to wake up the system from D3_hot or D3_cold
2299 * and this function allows them to set that up cleanly - pci_enable_wake()
2300 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
2301 * ordering constraints.
2302 *
2303 * This function only returns error code if the device is not allowed to wake
2304 * up the system from sleep or it is not capable of generating PME# from both
2305 * D3_hot and D3_cold and the platform is unable to enable wake-up power for it.
2306 */
pci_wake_from_d3(struct pci_dev * dev,bool enable)2307 int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2308 {
2309 return pci_pme_capable(dev, PCI_D3cold) ?
2310 pci_enable_wake(dev, PCI_D3cold, enable) :
2311 pci_enable_wake(dev, PCI_D3hot, enable);
2312 }
2313 EXPORT_SYMBOL(pci_wake_from_d3);
2314
2315 /**
2316 * pci_target_state - find an appropriate low power state for a given PCI dev
2317 * @dev: PCI device
2318 * @wakeup: Whether or not wakeup functionality will be enabled for the device.
2319 *
2320 * Use underlying platform code to find a supported low power state for @dev.
2321 * If the platform can't manage @dev, return the deepest state from which it
2322 * can generate wake events, based on any available PME info.
2323 */
pci_target_state(struct pci_dev * dev,bool wakeup)2324 static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
2325 {
2326 pci_power_t target_state = PCI_D3hot;
2327
2328 if (platform_pci_power_manageable(dev)) {
2329 /*
2330 * Call the platform to find the target state for the device.
2331 */
2332 pci_power_t state = platform_pci_choose_state(dev);
2333
2334 switch (state) {
2335 case PCI_POWER_ERROR:
2336 case PCI_UNKNOWN:
2337 break;
2338 case PCI_D1:
2339 case PCI_D2:
2340 if (pci_no_d1d2(dev))
2341 break;
2342 /* else, fall through */
2343 default:
2344 target_state = state;
2345 }
2346
2347 return target_state;
2348 }
2349
2350 if (!dev->pm_cap)
2351 target_state = PCI_D0;
2352
2353 /*
2354 * If the device is in D3cold even though it's not power-manageable by
2355 * the platform, it may have been powered down by non-standard means.
2356 * Best to let it slumber.
2357 */
2358 if (dev->current_state == PCI_D3cold)
2359 target_state = PCI_D3cold;
2360
2361 if (wakeup) {
2362 /*
2363 * Find the deepest state from which the device can generate
2364 * PME#.
2365 */
2366 if (dev->pme_support) {
2367 while (target_state
2368 && !(dev->pme_support & (1 << target_state)))
2369 target_state--;
2370 }
2371 }
2372
2373 return target_state;
2374 }
2375
2376 /**
2377 * pci_prepare_to_sleep - prepare PCI device for system-wide transition
2378 * into a sleep state
2379 * @dev: Device to handle.
2380 *
2381 * Choose the power state appropriate for the device depending on whether
2382 * it can wake up the system and/or is power manageable by the platform
2383 * (PCI_D3hot is the default) and put the device into that state.
2384 */
pci_prepare_to_sleep(struct pci_dev * dev)2385 int pci_prepare_to_sleep(struct pci_dev *dev)
2386 {
2387 bool wakeup = device_may_wakeup(&dev->dev);
2388 pci_power_t target_state = pci_target_state(dev, wakeup);
2389 int error;
2390
2391 if (target_state == PCI_POWER_ERROR)
2392 return -EIO;
2393
2394 pci_enable_wake(dev, target_state, wakeup);
2395
2396 error = pci_set_power_state(dev, target_state);
2397
2398 if (error)
2399 pci_enable_wake(dev, target_state, false);
2400
2401 return error;
2402 }
2403 EXPORT_SYMBOL(pci_prepare_to_sleep);
2404
2405 /**
2406 * pci_back_from_sleep - turn PCI device on during system-wide transition
2407 * into working state
2408 * @dev: Device to handle.
2409 *
2410 * Disable device's system wake-up capability and put it into D0.
2411 */
pci_back_from_sleep(struct pci_dev * dev)2412 int pci_back_from_sleep(struct pci_dev *dev)
2413 {
2414 pci_enable_wake(dev, PCI_D0, false);
2415 return pci_set_power_state(dev, PCI_D0);
2416 }
2417 EXPORT_SYMBOL(pci_back_from_sleep);
2418
2419 /**
2420 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
2421 * @dev: PCI device being suspended.
2422 *
2423 * Prepare @dev to generate wake-up events at run time and put it into a low
2424 * power state.
2425 */
pci_finish_runtime_suspend(struct pci_dev * dev)2426 int pci_finish_runtime_suspend(struct pci_dev *dev)
2427 {
2428 pci_power_t target_state;
2429 int error;
2430
2431 target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
2432 if (target_state == PCI_POWER_ERROR)
2433 return -EIO;
2434
2435 dev->runtime_d3cold = target_state == PCI_D3cold;
2436
2437 __pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
2438
2439 error = pci_set_power_state(dev, target_state);
2440
2441 if (error) {
2442 pci_enable_wake(dev, target_state, false);
2443 dev->runtime_d3cold = false;
2444 }
2445
2446 return error;
2447 }
2448
2449 /**
2450 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
2451 * @dev: Device to check.
2452 *
2453 * Return true if the device itself is capable of generating wake-up events
2454 * (through the platform or using the native PCIe PME) or if the device supports
2455 * PME and one of its upstream bridges can generate wake-up events.
2456 */
pci_dev_run_wake(struct pci_dev * dev)2457 bool pci_dev_run_wake(struct pci_dev *dev)
2458 {
2459 struct pci_bus *bus = dev->bus;
2460
2461 if (!dev->pme_support)
2462 return false;
2463
2464 /* PME-capable in principle, but not from the target power state */
2465 if (!pci_pme_capable(dev, pci_target_state(dev, true)))
2466 return false;
2467
2468 if (device_can_wakeup(&dev->dev))
2469 return true;
2470
2471 while (bus->parent) {
2472 struct pci_dev *bridge = bus->self;
2473
2474 if (device_can_wakeup(&bridge->dev))
2475 return true;
2476
2477 bus = bus->parent;
2478 }
2479
2480 /* We have reached the root bus. */
2481 if (bus->bridge)
2482 return device_can_wakeup(bus->bridge);
2483
2484 return false;
2485 }
2486 EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2487
2488 /**
2489 * pci_dev_need_resume - Check if it is necessary to resume the device.
2490 * @pci_dev: Device to check.
2491 *
2492 * Return 'true' if the device is not runtime-suspended or it has to be
2493 * reconfigured due to wakeup settings difference between system and runtime
2494 * suspend, or the current power state of it is not suitable for the upcoming
2495 * (system-wide) transition.
2496 */
pci_dev_need_resume(struct pci_dev * pci_dev)2497 bool pci_dev_need_resume(struct pci_dev *pci_dev)
2498 {
2499 struct device *dev = &pci_dev->dev;
2500 pci_power_t target_state;
2501
2502 if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev))
2503 return true;
2504
2505 target_state = pci_target_state(pci_dev, device_may_wakeup(dev));
2506
2507 /*
2508 * If the earlier platform check has not triggered, D3cold is just power
2509 * removal on top of D3hot, so no need to resume the device in that
2510 * case.
2511 */
2512 return target_state != pci_dev->current_state &&
2513 target_state != PCI_D3cold &&
2514 pci_dev->current_state != PCI_D3hot;
2515 }
2516
2517 /**
2518 * pci_dev_adjust_pme - Adjust PME setting for a suspended device.
2519 * @pci_dev: Device to check.
2520 *
2521 * If the device is suspended and it is not configured for system wakeup,
2522 * disable PME for it to prevent it from waking up the system unnecessarily.
2523 *
2524 * Note that if the device's power state is D3cold and the platform check in
2525 * pci_dev_need_resume() has not triggered, the device's configuration need not
2526 * be changed.
2527 */
pci_dev_adjust_pme(struct pci_dev * pci_dev)2528 void pci_dev_adjust_pme(struct pci_dev *pci_dev)
2529 {
2530 struct device *dev = &pci_dev->dev;
2531
2532 spin_lock_irq(&dev->power.lock);
2533
2534 if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) &&
2535 pci_dev->current_state < PCI_D3cold)
2536 __pci_pme_active(pci_dev, false);
2537
2538 spin_unlock_irq(&dev->power.lock);
2539 }
2540
2541 /**
2542 * pci_dev_complete_resume - Finalize resume from system sleep for a device.
2543 * @pci_dev: Device to handle.
2544 *
2545 * If the device is runtime suspended and wakeup-capable, enable PME for it as
2546 * it might have been disabled during the prepare phase of system suspend if
2547 * the device was not configured for system wakeup.
2548 */
pci_dev_complete_resume(struct pci_dev * pci_dev)2549 void pci_dev_complete_resume(struct pci_dev *pci_dev)
2550 {
2551 struct device *dev = &pci_dev->dev;
2552
2553 if (!pci_dev_run_wake(pci_dev))
2554 return;
2555
2556 spin_lock_irq(&dev->power.lock);
2557
2558 if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
2559 __pci_pme_active(pci_dev, true);
2560
2561 spin_unlock_irq(&dev->power.lock);
2562 }
2563
pci_config_pm_runtime_get(struct pci_dev * pdev)2564 void pci_config_pm_runtime_get(struct pci_dev *pdev)
2565 {
2566 struct device *dev = &pdev->dev;
2567 struct device *parent = dev->parent;
2568
2569 if (parent)
2570 pm_runtime_get_sync(parent);
2571 pm_runtime_get_noresume(dev);
2572 /*
2573 * pdev->current_state is set to PCI_D3cold during suspending,
2574 * so wait until suspending completes
2575 */
2576 pm_runtime_barrier(dev);
2577 /*
2578 * Only need to resume devices in D3cold, because config
2579 * registers are still accessible for devices suspended but
2580 * not in D3cold.
2581 */
2582 if (pdev->current_state == PCI_D3cold)
2583 pm_runtime_resume(dev);
2584 }
2585
pci_config_pm_runtime_put(struct pci_dev * pdev)2586 void pci_config_pm_runtime_put(struct pci_dev *pdev)
2587 {
2588 struct device *dev = &pdev->dev;
2589 struct device *parent = dev->parent;
2590
2591 pm_runtime_put(dev);
2592 if (parent)
2593 pm_runtime_put_sync(parent);
2594 }
2595
2596 static const struct dmi_system_id bridge_d3_blacklist[] = {
2597 #ifdef CONFIG_X86
2598 {
2599 /*
2600 * Gigabyte X299 root port is not marked as hotplug capable
2601 * which allows Linux to power manage it. However, this
2602 * confuses the BIOS SMI handler so don't power manage root
2603 * ports on that system.
2604 */
2605 .ident = "X299 DESIGNARE EX-CF",
2606 .matches = {
2607 DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
2608 DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
2609 },
2610 },
2611 #endif
2612 { }
2613 };
2614
2615 /**
2616 * pci_bridge_d3_possible - Is it possible to put the bridge into D3
2617 * @bridge: Bridge to check
2618 *
2619 * This function checks if it is possible to move the bridge to D3.
2620 * Currently we only allow D3 for recent enough PCIe ports and Thunderbolt.
2621 */
pci_bridge_d3_possible(struct pci_dev * bridge)2622 bool pci_bridge_d3_possible(struct pci_dev *bridge)
2623 {
2624 if (!pci_is_pcie(bridge))
2625 return false;
2626
2627 switch (pci_pcie_type(bridge)) {
2628 case PCI_EXP_TYPE_ROOT_PORT:
2629 case PCI_EXP_TYPE_UPSTREAM:
2630 case PCI_EXP_TYPE_DOWNSTREAM:
2631 if (pci_bridge_d3_disable)
2632 return false;
2633
2634 /*
2635 * Hotplug ports handled by firmware in System Management Mode
2636 * may not be put into D3 by the OS (Thunderbolt on non-Macs).
2637 */
2638 if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
2639 return false;
2640
2641 if (pci_bridge_d3_force)
2642 return true;
2643
2644 /* Even the oldest 2010 Thunderbolt controller supports D3. */
2645 if (bridge->is_thunderbolt)
2646 return true;
2647
2648 /* Platform might know better if the bridge supports D3 */
2649 if (platform_pci_bridge_d3(bridge))
2650 return true;
2651
2652 /*
2653 * Hotplug ports handled natively by the OS were not validated
2654 * by vendors for runtime D3 at least until 2018 because there
2655 * was no OS support.
2656 */
2657 if (bridge->is_hotplug_bridge)
2658 return false;
2659
2660 if (dmi_check_system(bridge_d3_blacklist))
2661 return false;
2662
2663 /*
2664 * It should be safe to put PCIe ports from 2015 or newer
2665 * to D3.
2666 */
2667 if (dmi_get_bios_year() >= 2015)
2668 return true;
2669 break;
2670 }
2671
2672 return false;
2673 }
2674
pci_dev_check_d3cold(struct pci_dev * dev,void * data)2675 static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
2676 {
2677 bool *d3cold_ok = data;
2678
2679 if (/* The device needs to be allowed to go D3cold ... */
2680 dev->no_d3cold || !dev->d3cold_allowed ||
2681
2682 /* ... and if it is wakeup capable to do so from D3cold. */
2683 (device_may_wakeup(&dev->dev) &&
2684 !pci_pme_capable(dev, PCI_D3cold)) ||
2685
2686 /* If it is a bridge it must be allowed to go to D3. */
2687 !pci_power_manageable(dev))
2688
2689 *d3cold_ok = false;
2690
2691 return !*d3cold_ok;
2692 }
2693
2694 /*
2695 * pci_bridge_d3_update - Update bridge D3 capabilities
2696 * @dev: PCI device which is changed
2697 *
2698 * Update upstream bridge PM capabilities accordingly depending on if the
2699 * device PM configuration was changed or the device is being removed. The
2700 * change is also propagated upstream.
2701 */
pci_bridge_d3_update(struct pci_dev * dev)2702 void pci_bridge_d3_update(struct pci_dev *dev)
2703 {
2704 bool remove = !device_is_registered(&dev->dev);
2705 struct pci_dev *bridge;
2706 bool d3cold_ok = true;
2707
2708 bridge = pci_upstream_bridge(dev);
2709 if (!bridge || !pci_bridge_d3_possible(bridge))
2710 return;
2711
2712 /*
2713 * If D3 is currently allowed for the bridge, removing one of its
2714 * children won't change that.
2715 */
2716 if (remove && bridge->bridge_d3)
2717 return;
2718
2719 /*
2720 * If D3 is currently allowed for the bridge and a child is added or
2721 * changed, disallowance of D3 can only be caused by that child, so
2722 * we only need to check that single device, not any of its siblings.
2723 *
2724 * If D3 is currently not allowed for the bridge, checking the device
2725 * first may allow us to skip checking its siblings.
2726 */
2727 if (!remove)
2728 pci_dev_check_d3cold(dev, &d3cold_ok);
2729
2730 /*
2731 * If D3 is currently not allowed for the bridge, this may be caused
2732 * either by the device being changed/removed or any of its siblings,
2733 * so we need to go through all children to find out if one of them
2734 * continues to block D3.
2735 */
2736 if (d3cold_ok && !bridge->bridge_d3)
2737 pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
2738 &d3cold_ok);
2739
2740 if (bridge->bridge_d3 != d3cold_ok) {
2741 bridge->bridge_d3 = d3cold_ok;
2742 /* Propagate change to upstream bridges */
2743 pci_bridge_d3_update(bridge);
2744 }
2745 }
2746
2747 /**
2748 * pci_d3cold_enable - Enable D3cold for device
2749 * @dev: PCI device to handle
2750 *
2751 * This function can be used in drivers to enable D3cold from the device
2752 * they handle. It also updates upstream PCI bridge PM capabilities
2753 * accordingly.
2754 */
pci_d3cold_enable(struct pci_dev * dev)2755 void pci_d3cold_enable(struct pci_dev *dev)
2756 {
2757 if (dev->no_d3cold) {
2758 dev->no_d3cold = false;
2759 pci_bridge_d3_update(dev);
2760 }
2761 }
2762 EXPORT_SYMBOL_GPL(pci_d3cold_enable);
2763
2764 /**
2765 * pci_d3cold_disable - Disable D3cold for device
2766 * @dev: PCI device to handle
2767 *
2768 * This function can be used in drivers to disable D3cold from the device
2769 * they handle. It also updates upstream PCI bridge PM capabilities
2770 * accordingly.
2771 */
pci_d3cold_disable(struct pci_dev * dev)2772 void pci_d3cold_disable(struct pci_dev *dev)
2773 {
2774 if (!dev->no_d3cold) {
2775 dev->no_d3cold = true;
2776 pci_bridge_d3_update(dev);
2777 }
2778 }
2779 EXPORT_SYMBOL_GPL(pci_d3cold_disable);
2780
2781 /**
2782 * pci_pm_init - Initialize PM functions of given PCI device
2783 * @dev: PCI device to handle.
2784 */
pci_pm_init(struct pci_dev * dev)2785 void pci_pm_init(struct pci_dev *dev)
2786 {
2787 int pm;
2788 u16 status;
2789 u16 pmc;
2790
2791 pm_runtime_forbid(&dev->dev);
2792 pm_runtime_set_active(&dev->dev);
2793 pm_runtime_enable(&dev->dev);
2794 device_enable_async_suspend(&dev->dev);
2795 dev->wakeup_prepared = false;
2796
2797 dev->pm_cap = 0;
2798 dev->pme_support = 0;
2799
2800 /* find PCI PM capability in list */
2801 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
2802 if (!pm)
2803 return;
2804 /* Check device's ability to generate PME# */
2805 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
2806
2807 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
2808 pci_err(dev, "unsupported PM cap regs version (%u)\n",
2809 pmc & PCI_PM_CAP_VER_MASK);
2810 return;
2811 }
2812
2813 dev->pm_cap = pm;
2814 dev->d3_delay = PCI_PM_D3_WAIT;
2815 dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
2816 dev->bridge_d3 = pci_bridge_d3_possible(dev);
2817 dev->d3cold_allowed = true;
2818
2819 dev->d1_support = false;
2820 dev->d2_support = false;
2821 if (!pci_no_d1d2(dev)) {
2822 if (pmc & PCI_PM_CAP_D1)
2823 dev->d1_support = true;
2824 if (pmc & PCI_PM_CAP_D2)
2825 dev->d2_support = true;
2826
2827 if (dev->d1_support || dev->d2_support)
2828 pci_info(dev, "supports%s%s\n",
2829 dev->d1_support ? " D1" : "",
2830 dev->d2_support ? " D2" : "");
2831 }
2832
2833 pmc &= PCI_PM_CAP_PME_MASK;
2834 if (pmc) {
2835 pci_info(dev, "PME# supported from%s%s%s%s%s\n",
2836 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
2837 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
2838 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
2839 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
2840 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
2841 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
2842 dev->pme_poll = true;
2843 /*
2844 * Make device's PM flags reflect the wake-up capability, but
2845 * let the user space enable it to wake up the system as needed.
2846 */
2847 device_set_wakeup_capable(&dev->dev, true);
2848 /* Disable the PME# generation functionality */
2849 pci_pme_active(dev, false);
2850 }
2851
2852 pci_read_config_word(dev, PCI_STATUS, &status);
2853 if (status & PCI_STATUS_IMM_READY)
2854 dev->imm_ready = 1;
2855 }
2856
pci_ea_flags(struct pci_dev * dev,u8 prop)2857 static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
2858 {
2859 unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
2860
2861 switch (prop) {
2862 case PCI_EA_P_MEM:
2863 case PCI_EA_P_VF_MEM:
2864 flags |= IORESOURCE_MEM;
2865 break;
2866 case PCI_EA_P_MEM_PREFETCH:
2867 case PCI_EA_P_VF_MEM_PREFETCH:
2868 flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
2869 break;
2870 case PCI_EA_P_IO:
2871 flags |= IORESOURCE_IO;
2872 break;
2873 default:
2874 return 0;
2875 }
2876
2877 return flags;
2878 }
2879
pci_ea_get_resource(struct pci_dev * dev,u8 bei,u8 prop)2880 static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
2881 u8 prop)
2882 {
2883 if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
2884 return &dev->resource[bei];
2885 #ifdef CONFIG_PCI_IOV
2886 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
2887 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
2888 return &dev->resource[PCI_IOV_RESOURCES +
2889 bei - PCI_EA_BEI_VF_BAR0];
2890 #endif
2891 else if (bei == PCI_EA_BEI_ROM)
2892 return &dev->resource[PCI_ROM_RESOURCE];
2893 else
2894 return NULL;
2895 }
2896
2897 /* Read an Enhanced Allocation (EA) entry */
pci_ea_read(struct pci_dev * dev,int offset)2898 static int pci_ea_read(struct pci_dev *dev, int offset)
2899 {
2900 struct resource *res;
2901 int ent_size, ent_offset = offset;
2902 resource_size_t start, end;
2903 unsigned long flags;
2904 u32 dw0, bei, base, max_offset;
2905 u8 prop;
2906 bool support_64 = (sizeof(resource_size_t) >= 8);
2907
2908 pci_read_config_dword(dev, ent_offset, &dw0);
2909 ent_offset += 4;
2910
2911 /* Entry size field indicates DWORDs after 1st */
2912 ent_size = ((dw0 & PCI_EA_ES) + 1) << 2;
2913
2914 if (!(dw0 & PCI_EA_ENABLE)) /* Entry not enabled */
2915 goto out;
2916
2917 bei = (dw0 & PCI_EA_BEI) >> 4;
2918 prop = (dw0 & PCI_EA_PP) >> 8;
2919
2920 /*
2921 * If the Property is in the reserved range, try the Secondary
2922 * Property instead.
2923 */
2924 if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
2925 prop = (dw0 & PCI_EA_SP) >> 16;
2926 if (prop > PCI_EA_P_BRIDGE_IO)
2927 goto out;
2928
2929 res = pci_ea_get_resource(dev, bei, prop);
2930 if (!res) {
2931 pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
2932 goto out;
2933 }
2934
2935 flags = pci_ea_flags(dev, prop);
2936 if (!flags) {
2937 pci_err(dev, "Unsupported EA properties: %#x\n", prop);
2938 goto out;
2939 }
2940
2941 /* Read Base */
2942 pci_read_config_dword(dev, ent_offset, &base);
2943 start = (base & PCI_EA_FIELD_MASK);
2944 ent_offset += 4;
2945
2946 /* Read MaxOffset */
2947 pci_read_config_dword(dev, ent_offset, &max_offset);
2948 ent_offset += 4;
2949
2950 /* Read Base MSBs (if 64-bit entry) */
2951 if (base & PCI_EA_IS_64) {
2952 u32 base_upper;
2953
2954 pci_read_config_dword(dev, ent_offset, &base_upper);
2955 ent_offset += 4;
2956
2957 flags |= IORESOURCE_MEM_64;
2958
2959 /* entry starts above 32-bit boundary, can't use */
2960 if (!support_64 && base_upper)
2961 goto out;
2962
2963 if (support_64)
2964 start |= ((u64)base_upper << 32);
2965 }
2966
2967 end = start + (max_offset | 0x03);
2968
2969 /* Read MaxOffset MSBs (if 64-bit entry) */
2970 if (max_offset & PCI_EA_IS_64) {
2971 u32 max_offset_upper;
2972
2973 pci_read_config_dword(dev, ent_offset, &max_offset_upper);
2974 ent_offset += 4;
2975
2976 flags |= IORESOURCE_MEM_64;
2977
2978 /* entry too big, can't use */
2979 if (!support_64 && max_offset_upper)
2980 goto out;
2981
2982 if (support_64)
2983 end += ((u64)max_offset_upper << 32);
2984 }
2985
2986 if (end < start) {
2987 pci_err(dev, "EA Entry crosses address boundary\n");
2988 goto out;
2989 }
2990
2991 if (ent_size != ent_offset - offset) {
2992 pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n",
2993 ent_size, ent_offset - offset);
2994 goto out;
2995 }
2996
2997 res->name = pci_name(dev);
2998 res->start = start;
2999 res->end = end;
3000 res->flags = flags;
3001
3002 if (bei <= PCI_EA_BEI_BAR5)
3003 pci_info(dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3004 bei, res, prop);
3005 else if (bei == PCI_EA_BEI_ROM)
3006 pci_info(dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
3007 res, prop);
3008 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
3009 pci_info(dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3010 bei - PCI_EA_BEI_VF_BAR0, res, prop);
3011 else
3012 pci_info(dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
3013 bei, res, prop);
3014
3015 out:
3016 return offset + ent_size;
3017 }
3018
3019 /* Enhanced Allocation Initialization */
pci_ea_init(struct pci_dev * dev)3020 void pci_ea_init(struct pci_dev *dev)
3021 {
3022 int ea;
3023 u8 num_ent;
3024 int offset;
3025 int i;
3026
3027 /* find PCI EA capability in list */
3028 ea = pci_find_capability(dev, PCI_CAP_ID_EA);
3029 if (!ea)
3030 return;
3031
3032 /* determine the number of entries */
3033 pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
3034 &num_ent);
3035 num_ent &= PCI_EA_NUM_ENT_MASK;
3036
3037 offset = ea + PCI_EA_FIRST_ENT;
3038
3039 /* Skip DWORD 2 for type 1 functions */
3040 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
3041 offset += 4;
3042
3043 /* parse each EA entry */
3044 for (i = 0; i < num_ent; ++i)
3045 offset = pci_ea_read(dev, offset);
3046 }
3047
pci_add_saved_cap(struct pci_dev * pci_dev,struct pci_cap_saved_state * new_cap)3048 static void pci_add_saved_cap(struct pci_dev *pci_dev,
3049 struct pci_cap_saved_state *new_cap)
3050 {
3051 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
3052 }
3053
3054 /**
3055 * _pci_add_cap_save_buffer - allocate buffer for saving given
3056 * capability registers
3057 * @dev: the PCI device
3058 * @cap: the capability to allocate the buffer for
3059 * @extended: Standard or Extended capability ID
3060 * @size: requested size of the buffer
3061 */
_pci_add_cap_save_buffer(struct pci_dev * dev,u16 cap,bool extended,unsigned int size)3062 static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
3063 bool extended, unsigned int size)
3064 {
3065 int pos;
3066 struct pci_cap_saved_state *save_state;
3067
3068 if (extended)
3069 pos = pci_find_ext_capability(dev, cap);
3070 else
3071 pos = pci_find_capability(dev, cap);
3072
3073 if (!pos)
3074 return 0;
3075
3076 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
3077 if (!save_state)
3078 return -ENOMEM;
3079
3080 save_state->cap.cap_nr = cap;
3081 save_state->cap.cap_extended = extended;
3082 save_state->cap.size = size;
3083 pci_add_saved_cap(dev, save_state);
3084
3085 return 0;
3086 }
3087
pci_add_cap_save_buffer(struct pci_dev * dev,char cap,unsigned int size)3088 int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
3089 {
3090 return _pci_add_cap_save_buffer(dev, cap, false, size);
3091 }
3092
pci_add_ext_cap_save_buffer(struct pci_dev * dev,u16 cap,unsigned int size)3093 int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
3094 {
3095 return _pci_add_cap_save_buffer(dev, cap, true, size);
3096 }
3097
3098 /**
3099 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
3100 * @dev: the PCI device
3101 */
pci_allocate_cap_save_buffers(struct pci_dev * dev)3102 void pci_allocate_cap_save_buffers(struct pci_dev *dev)
3103 {
3104 int error;
3105
3106 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
3107 PCI_EXP_SAVE_REGS * sizeof(u16));
3108 if (error)
3109 pci_err(dev, "unable to preallocate PCI Express save buffer\n");
3110
3111 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
3112 if (error)
3113 pci_err(dev, "unable to preallocate PCI-X save buffer\n");
3114
3115 error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR,
3116 2 * sizeof(u16));
3117 if (error)
3118 pci_err(dev, "unable to allocate suspend buffer for LTR\n");
3119
3120 pci_allocate_vc_save_buffers(dev);
3121 }
3122
pci_free_cap_save_buffers(struct pci_dev * dev)3123 void pci_free_cap_save_buffers(struct pci_dev *dev)
3124 {
3125 struct pci_cap_saved_state *tmp;
3126 struct hlist_node *n;
3127
3128 hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
3129 kfree(tmp);
3130 }
3131
3132 /**
3133 * pci_configure_ari - enable or disable ARI forwarding
3134 * @dev: the PCI device
3135 *
3136 * If @dev and its upstream bridge both support ARI, enable ARI in the
3137 * bridge. Otherwise, disable ARI in the bridge.
3138 */
pci_configure_ari(struct pci_dev * dev)3139 void pci_configure_ari(struct pci_dev *dev)
3140 {
3141 u32 cap;
3142 struct pci_dev *bridge;
3143
3144 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
3145 return;
3146
3147 bridge = dev->bus->self;
3148 if (!bridge)
3149 return;
3150
3151 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3152 if (!(cap & PCI_EXP_DEVCAP2_ARI))
3153 return;
3154
3155 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
3156 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
3157 PCI_EXP_DEVCTL2_ARI);
3158 bridge->ari_enabled = 1;
3159 } else {
3160 pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
3161 PCI_EXP_DEVCTL2_ARI);
3162 bridge->ari_enabled = 0;
3163 }
3164 }
3165
3166 static int pci_acs_enable;
3167
3168 /**
3169 * pci_request_acs - ask for ACS to be enabled if supported
3170 */
pci_request_acs(void)3171 void pci_request_acs(void)
3172 {
3173 pci_acs_enable = 1;
3174 }
3175
3176 static const char *disable_acs_redir_param;
3177
3178 /**
3179 * pci_disable_acs_redir - disable ACS redirect capabilities
3180 * @dev: the PCI device
3181 *
3182 * For only devices specified in the disable_acs_redir parameter.
3183 */
pci_disable_acs_redir(struct pci_dev * dev)3184 static void pci_disable_acs_redir(struct pci_dev *dev)
3185 {
3186 int ret = 0;
3187 const char *p;
3188 int pos;
3189 u16 ctrl;
3190
3191 if (!disable_acs_redir_param)
3192 return;
3193
3194 p = disable_acs_redir_param;
3195 while (*p) {
3196 ret = pci_dev_str_match(dev, p, &p);
3197 if (ret < 0) {
3198 pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n",
3199 disable_acs_redir_param);
3200
3201 break;
3202 } else if (ret == 1) {
3203 /* Found a match */
3204 break;
3205 }
3206
3207 if (*p != ';' && *p != ',') {
3208 /* End of param or invalid format */
3209 break;
3210 }
3211 p++;
3212 }
3213
3214 if (ret != 1)
3215 return;
3216
3217 if (!pci_dev_specific_disable_acs_redir(dev))
3218 return;
3219
3220 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
3221 if (!pos) {
3222 pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n");
3223 return;
3224 }
3225
3226 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
3227
3228 /* P2P Request & Completion Redirect */
3229 ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
3230
3231 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
3232
3233 pci_info(dev, "disabled ACS redirect\n");
3234 }
3235
3236 /**
3237 * pci_std_enable_acs - enable ACS on devices using standard ACS capabilities
3238 * @dev: the PCI device
3239 */
pci_std_enable_acs(struct pci_dev * dev)3240 static void pci_std_enable_acs(struct pci_dev *dev)
3241 {
3242 int pos;
3243 u16 cap;
3244 u16 ctrl;
3245
3246 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
3247 if (!pos)
3248 return;
3249
3250 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
3251 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
3252
3253 /* Source Validation */
3254 ctrl |= (cap & PCI_ACS_SV);
3255
3256 /* P2P Request Redirect */
3257 ctrl |= (cap & PCI_ACS_RR);
3258
3259 /* P2P Completion Redirect */
3260 ctrl |= (cap & PCI_ACS_CR);
3261
3262 /* Upstream Forwarding */
3263 ctrl |= (cap & PCI_ACS_UF);
3264
3265 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
3266 }
3267
3268 /**
3269 * pci_enable_acs - enable ACS if hardware support it
3270 * @dev: the PCI device
3271 */
pci_enable_acs(struct pci_dev * dev)3272 void pci_enable_acs(struct pci_dev *dev)
3273 {
3274 if (!pci_acs_enable)
3275 goto disable_acs_redir;
3276
3277 if (!pci_dev_specific_enable_acs(dev))
3278 goto disable_acs_redir;
3279
3280 pci_std_enable_acs(dev);
3281
3282 disable_acs_redir:
3283 /*
3284 * Note: pci_disable_acs_redir() must be called even if ACS was not
3285 * enabled by the kernel because it may have been enabled by
3286 * platform firmware. So if we are told to disable it, we should
3287 * always disable it after setting the kernel's default
3288 * preferences.
3289 */
3290 pci_disable_acs_redir(dev);
3291 }
3292
pci_acs_flags_enabled(struct pci_dev * pdev,u16 acs_flags)3293 static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
3294 {
3295 int pos;
3296 u16 cap, ctrl;
3297
3298 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
3299 if (!pos)
3300 return false;
3301
3302 /*
3303 * Except for egress control, capabilities are either required
3304 * or only required if controllable. Features missing from the
3305 * capability field can therefore be assumed as hard-wired enabled.
3306 */
3307 pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
3308 acs_flags &= (cap | PCI_ACS_EC);
3309
3310 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
3311 return (ctrl & acs_flags) == acs_flags;
3312 }
3313
3314 /**
3315 * pci_acs_enabled - test ACS against required flags for a given device
3316 * @pdev: device to test
3317 * @acs_flags: required PCI ACS flags
3318 *
3319 * Return true if the device supports the provided flags. Automatically
3320 * filters out flags that are not implemented on multifunction devices.
3321 *
3322 * Note that this interface checks the effective ACS capabilities of the
3323 * device rather than the actual capabilities. For instance, most single
3324 * function endpoints are not required to support ACS because they have no
3325 * opportunity for peer-to-peer access. We therefore return 'true'
3326 * regardless of whether the device exposes an ACS capability. This makes
3327 * it much easier for callers of this function to ignore the actual type
3328 * or topology of the device when testing ACS support.
3329 */
pci_acs_enabled(struct pci_dev * pdev,u16 acs_flags)3330 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
3331 {
3332 int ret;
3333
3334 ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
3335 if (ret >= 0)
3336 return ret > 0;
3337
3338 /*
3339 * Conventional PCI and PCI-X devices never support ACS, either
3340 * effectively or actually. The shared bus topology implies that
3341 * any device on the bus can receive or snoop DMA.
3342 */
3343 if (!pci_is_pcie(pdev))
3344 return false;
3345
3346 switch (pci_pcie_type(pdev)) {
3347 /*
3348 * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
3349 * but since their primary interface is PCI/X, we conservatively
3350 * handle them as we would a non-PCIe device.
3351 */
3352 case PCI_EXP_TYPE_PCIE_BRIDGE:
3353 /*
3354 * PCIe 3.0, 6.12.1 excludes ACS on these devices. "ACS is never
3355 * applicable... must never implement an ACS Extended Capability...".
3356 * This seems arbitrary, but we take a conservative interpretation
3357 * of this statement.
3358 */
3359 case PCI_EXP_TYPE_PCI_BRIDGE:
3360 case PCI_EXP_TYPE_RC_EC:
3361 return false;
3362 /*
3363 * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should
3364 * implement ACS in order to indicate their peer-to-peer capabilities,
3365 * regardless of whether they are single- or multi-function devices.
3366 */
3367 case PCI_EXP_TYPE_DOWNSTREAM:
3368 case PCI_EXP_TYPE_ROOT_PORT:
3369 return pci_acs_flags_enabled(pdev, acs_flags);
3370 /*
3371 * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
3372 * implemented by the remaining PCIe types to indicate peer-to-peer
3373 * capabilities, but only when they are part of a multifunction
3374 * device. The footnote for section 6.12 indicates the specific
3375 * PCIe types included here.
3376 */
3377 case PCI_EXP_TYPE_ENDPOINT:
3378 case PCI_EXP_TYPE_UPSTREAM:
3379 case PCI_EXP_TYPE_LEG_END:
3380 case PCI_EXP_TYPE_RC_END:
3381 if (!pdev->multifunction)
3382 break;
3383
3384 return pci_acs_flags_enabled(pdev, acs_flags);
3385 }
3386
3387 /*
3388 * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable
3389 * to single function devices with the exception of downstream ports.
3390 */
3391 return true;
3392 }
3393
3394 /**
3395 * pci_acs_path_enable - test ACS flags from start to end in a hierarchy
3396 * @start: starting downstream device
3397 * @end: ending upstream device or NULL to search to the root bus
3398 * @acs_flags: required flags
3399 *
3400 * Walk up a device tree from start to end testing PCI ACS support. If
3401 * any step along the way does not support the required flags, return false.
3402 */
pci_acs_path_enabled(struct pci_dev * start,struct pci_dev * end,u16 acs_flags)3403 bool pci_acs_path_enabled(struct pci_dev *start,
3404 struct pci_dev *end, u16 acs_flags)
3405 {
3406 struct pci_dev *pdev, *parent = start;
3407
3408 do {
3409 pdev = parent;
3410
3411 if (!pci_acs_enabled(pdev, acs_flags))
3412 return false;
3413
3414 if (pci_is_root_bus(pdev->bus))
3415 return (end == NULL);
3416
3417 parent = pdev->bus->self;
3418 } while (pdev != end);
3419
3420 return true;
3421 }
3422
3423 /**
3424 * pci_rebar_find_pos - find position of resize ctrl reg for BAR
3425 * @pdev: PCI device
3426 * @bar: BAR to find
3427 *
3428 * Helper to find the position of the ctrl register for a BAR.
3429 * Returns -ENOTSUPP if resizable BARs are not supported at all.
3430 * Returns -ENOENT if no ctrl register for the BAR could be found.
3431 */
pci_rebar_find_pos(struct pci_dev * pdev,int bar)3432 static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
3433 {
3434 unsigned int pos, nbars, i;
3435 u32 ctrl;
3436
3437 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
3438 if (!pos)
3439 return -ENOTSUPP;
3440
3441 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3442 nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
3443 PCI_REBAR_CTRL_NBAR_SHIFT;
3444
3445 for (i = 0; i < nbars; i++, pos += 8) {
3446 int bar_idx;
3447
3448 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3449 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
3450 if (bar_idx == bar)
3451 return pos;
3452 }
3453
3454 return -ENOENT;
3455 }
3456
3457 /**
3458 * pci_rebar_get_possible_sizes - get possible sizes for BAR
3459 * @pdev: PCI device
3460 * @bar: BAR to query
3461 *
3462 * Get the possible sizes of a resizable BAR as bitmask defined in the spec
3463 * (bit 0=1MB, bit 19=512GB). Returns 0 if BAR isn't resizable.
3464 */
pci_rebar_get_possible_sizes(struct pci_dev * pdev,int bar)3465 u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
3466 {
3467 int pos;
3468 u32 cap;
3469
3470 pos = pci_rebar_find_pos(pdev, bar);
3471 if (pos < 0)
3472 return 0;
3473
3474 pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
3475 return (cap & PCI_REBAR_CAP_SIZES) >> 4;
3476 }
3477
3478 /**
3479 * pci_rebar_get_current_size - get the current size of a BAR
3480 * @pdev: PCI device
3481 * @bar: BAR to set size to
3482 *
3483 * Read the size of a BAR from the resizable BAR config.
3484 * Returns size if found or negative error code.
3485 */
pci_rebar_get_current_size(struct pci_dev * pdev,int bar)3486 int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
3487 {
3488 int pos;
3489 u32 ctrl;
3490
3491 pos = pci_rebar_find_pos(pdev, bar);
3492 if (pos < 0)
3493 return pos;
3494
3495 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3496 return (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> PCI_REBAR_CTRL_BAR_SHIFT;
3497 }
3498
3499 /**
3500 * pci_rebar_set_size - set a new size for a BAR
3501 * @pdev: PCI device
3502 * @bar: BAR to set size to
3503 * @size: new size as defined in the spec (0=1MB, 19=512GB)
3504 *
3505 * Set the new size of a BAR as defined in the spec.
3506 * Returns zero if resizing was successful, error code otherwise.
3507 */
pci_rebar_set_size(struct pci_dev * pdev,int bar,int size)3508 int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
3509 {
3510 int pos;
3511 u32 ctrl;
3512
3513 pos = pci_rebar_find_pos(pdev, bar);
3514 if (pos < 0)
3515 return pos;
3516
3517 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3518 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
3519 ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
3520 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
3521 return 0;
3522 }
3523
3524 /**
3525 * pci_enable_atomic_ops_to_root - enable AtomicOp requests to root port
3526 * @dev: the PCI device
3527 * @cap_mask: mask of desired AtomicOp sizes, including one or more of:
3528 * PCI_EXP_DEVCAP2_ATOMIC_COMP32
3529 * PCI_EXP_DEVCAP2_ATOMIC_COMP64
3530 * PCI_EXP_DEVCAP2_ATOMIC_COMP128
3531 *
3532 * Return 0 if all upstream bridges support AtomicOp routing, egress
3533 * blocking is disabled on all upstream ports, and the root port supports
3534 * the requested completion capabilities (32-bit, 64-bit and/or 128-bit
3535 * AtomicOp completion), or negative otherwise.
3536 */
pci_enable_atomic_ops_to_root(struct pci_dev * dev,u32 cap_mask)3537 int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
3538 {
3539 struct pci_bus *bus = dev->bus;
3540 struct pci_dev *bridge;
3541 u32 cap, ctl2;
3542
3543 if (!pci_is_pcie(dev))
3544 return -EINVAL;
3545
3546 /*
3547 * Per PCIe r4.0, sec 6.15, endpoints and root ports may be
3548 * AtomicOp requesters. For now, we only support endpoints as
3549 * requesters and root ports as completers. No endpoints as
3550 * completers, and no peer-to-peer.
3551 */
3552
3553 switch (pci_pcie_type(dev)) {
3554 case PCI_EXP_TYPE_ENDPOINT:
3555 case PCI_EXP_TYPE_LEG_END:
3556 case PCI_EXP_TYPE_RC_END:
3557 break;
3558 default:
3559 return -EINVAL;
3560 }
3561
3562 while (bus->parent) {
3563 bridge = bus->self;
3564
3565 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3566
3567 switch (pci_pcie_type(bridge)) {
3568 /* Ensure switch ports support AtomicOp routing */
3569 case PCI_EXP_TYPE_UPSTREAM:
3570 case PCI_EXP_TYPE_DOWNSTREAM:
3571 if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
3572 return -EINVAL;
3573 break;
3574
3575 /* Ensure root port supports all the sizes we care about */
3576 case PCI_EXP_TYPE_ROOT_PORT:
3577 if ((cap & cap_mask) != cap_mask)
3578 return -EINVAL;
3579 break;
3580 }
3581
3582 /* Ensure upstream ports don't block AtomicOps on egress */
3583 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) {
3584 pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
3585 &ctl2);
3586 if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
3587 return -EINVAL;
3588 }
3589
3590 bus = bus->parent;
3591 }
3592
3593 pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
3594 PCI_EXP_DEVCTL2_ATOMIC_REQ);
3595 return 0;
3596 }
3597 EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
3598
3599 /**
3600 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
3601 * @dev: the PCI device
3602 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
3603 *
3604 * Perform INTx swizzling for a device behind one level of bridge. This is
3605 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
3606 * behind bridges on add-in cards. For devices with ARI enabled, the slot
3607 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
3608 * the PCI Express Base Specification, Revision 2.1)
3609 */
pci_swizzle_interrupt_pin(const struct pci_dev * dev,u8 pin)3610 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
3611 {
3612 int slot;
3613
3614 if (pci_ari_enabled(dev->bus))
3615 slot = 0;
3616 else
3617 slot = PCI_SLOT(dev->devfn);
3618
3619 return (((pin - 1) + slot) % 4) + 1;
3620 }
3621
pci_get_interrupt_pin(struct pci_dev * dev,struct pci_dev ** bridge)3622 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
3623 {
3624 u8 pin;
3625
3626 pin = dev->pin;
3627 if (!pin)
3628 return -1;
3629
3630 while (!pci_is_root_bus(dev->bus)) {
3631 pin = pci_swizzle_interrupt_pin(dev, pin);
3632 dev = dev->bus->self;
3633 }
3634 *bridge = dev;
3635 return pin;
3636 }
3637
3638 /**
3639 * pci_common_swizzle - swizzle INTx all the way to root bridge
3640 * @dev: the PCI device
3641 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
3642 *
3643 * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
3644 * bridges all the way up to a PCI root bus.
3645 */
pci_common_swizzle(struct pci_dev * dev,u8 * pinp)3646 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
3647 {
3648 u8 pin = *pinp;
3649
3650 while (!pci_is_root_bus(dev->bus)) {
3651 pin = pci_swizzle_interrupt_pin(dev, pin);
3652 dev = dev->bus->self;
3653 }
3654 *pinp = pin;
3655 return PCI_SLOT(dev->devfn);
3656 }
3657 EXPORT_SYMBOL_GPL(pci_common_swizzle);
3658
3659 /**
3660 * pci_release_region - Release a PCI bar
3661 * @pdev: PCI device whose resources were previously reserved by
3662 * pci_request_region()
3663 * @bar: BAR to release
3664 *
3665 * Releases the PCI I/O and memory resources previously reserved by a
3666 * successful call to pci_request_region(). Call this function only
3667 * after all use of the PCI regions has ceased.
3668 */
pci_release_region(struct pci_dev * pdev,int bar)3669 void pci_release_region(struct pci_dev *pdev, int bar)
3670 {
3671 struct pci_devres *dr;
3672
3673 if (pci_resource_len(pdev, bar) == 0)
3674 return;
3675 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
3676 release_region(pci_resource_start(pdev, bar),
3677 pci_resource_len(pdev, bar));
3678 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
3679 release_mem_region(pci_resource_start(pdev, bar),
3680 pci_resource_len(pdev, bar));
3681
3682 dr = find_pci_dr(pdev);
3683 if (dr)
3684 dr->region_mask &= ~(1 << bar);
3685 }
3686 EXPORT_SYMBOL(pci_release_region);
3687
3688 /**
3689 * __pci_request_region - Reserved PCI I/O and memory resource
3690 * @pdev: PCI device whose resources are to be reserved
3691 * @bar: BAR to be reserved
3692 * @res_name: Name to be associated with resource.
3693 * @exclusive: whether the region access is exclusive or not
3694 *
3695 * Mark the PCI region associated with PCI device @pdev BAR @bar as
3696 * being reserved by owner @res_name. Do not access any
3697 * address inside the PCI regions unless this call returns
3698 * successfully.
3699 *
3700 * If @exclusive is set, then the region is marked so that userspace
3701 * is explicitly not allowed to map the resource via /dev/mem or
3702 * sysfs MMIO access.
3703 *
3704 * Returns 0 on success, or %EBUSY on error. A warning
3705 * message is also printed on failure.
3706 */
__pci_request_region(struct pci_dev * pdev,int bar,const char * res_name,int exclusive)3707 static int __pci_request_region(struct pci_dev *pdev, int bar,
3708 const char *res_name, int exclusive)
3709 {
3710 struct pci_devres *dr;
3711
3712 if (pci_resource_len(pdev, bar) == 0)
3713 return 0;
3714
3715 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
3716 if (!request_region(pci_resource_start(pdev, bar),
3717 pci_resource_len(pdev, bar), res_name))
3718 goto err_out;
3719 } else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
3720 if (!__request_mem_region(pci_resource_start(pdev, bar),
3721 pci_resource_len(pdev, bar), res_name,
3722 exclusive))
3723 goto err_out;
3724 }
3725
3726 dr = find_pci_dr(pdev);
3727 if (dr)
3728 dr->region_mask |= 1 << bar;
3729
3730 return 0;
3731
3732 err_out:
3733 pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar,
3734 &pdev->resource[bar]);
3735 return -EBUSY;
3736 }
3737
3738 /**
3739 * pci_request_region - Reserve PCI I/O and memory resource
3740 * @pdev: PCI device whose resources are to be reserved
3741 * @bar: BAR to be reserved
3742 * @res_name: Name to be associated with resource
3743 *
3744 * Mark the PCI region associated with PCI device @pdev BAR @bar as
3745 * being reserved by owner @res_name. Do not access any
3746 * address inside the PCI regions unless this call returns
3747 * successfully.
3748 *
3749 * Returns 0 on success, or %EBUSY on error. A warning
3750 * message is also printed on failure.
3751 */
pci_request_region(struct pci_dev * pdev,int bar,const char * res_name)3752 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
3753 {
3754 return __pci_request_region(pdev, bar, res_name, 0);
3755 }
3756 EXPORT_SYMBOL(pci_request_region);
3757
3758 /**
3759 * pci_release_selected_regions - Release selected PCI I/O and memory resources
3760 * @pdev: PCI device whose resources were previously reserved
3761 * @bars: Bitmask of BARs to be released
3762 *
3763 * Release selected PCI I/O and memory resources previously reserved.
3764 * Call this function only after all use of the PCI regions has ceased.
3765 */
pci_release_selected_regions(struct pci_dev * pdev,int bars)3766 void pci_release_selected_regions(struct pci_dev *pdev, int bars)
3767 {
3768 int i;
3769
3770 for (i = 0; i < 6; i++)
3771 if (bars & (1 << i))
3772 pci_release_region(pdev, i);
3773 }
3774 EXPORT_SYMBOL(pci_release_selected_regions);
3775
__pci_request_selected_regions(struct pci_dev * pdev,int bars,const char * res_name,int excl)3776 static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
3777 const char *res_name, int excl)
3778 {
3779 int i;
3780
3781 for (i = 0; i < 6; i++)
3782 if (bars & (1 << i))
3783 if (__pci_request_region(pdev, i, res_name, excl))
3784 goto err_out;
3785 return 0;
3786
3787 err_out:
3788 while (--i >= 0)
3789 if (bars & (1 << i))
3790 pci_release_region(pdev, i);
3791
3792 return -EBUSY;
3793 }
3794
3795
3796 /**
3797 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
3798 * @pdev: PCI device whose resources are to be reserved
3799 * @bars: Bitmask of BARs to be requested
3800 * @res_name: Name to be associated with resource
3801 */
pci_request_selected_regions(struct pci_dev * pdev,int bars,const char * res_name)3802 int pci_request_selected_regions(struct pci_dev *pdev, int bars,
3803 const char *res_name)
3804 {
3805 return __pci_request_selected_regions(pdev, bars, res_name, 0);
3806 }
3807 EXPORT_SYMBOL(pci_request_selected_regions);
3808
pci_request_selected_regions_exclusive(struct pci_dev * pdev,int bars,const char * res_name)3809 int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
3810 const char *res_name)
3811 {
3812 return __pci_request_selected_regions(pdev, bars, res_name,
3813 IORESOURCE_EXCLUSIVE);
3814 }
3815 EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3816
3817 /**
3818 * pci_release_regions - Release reserved PCI I/O and memory resources
3819 * @pdev: PCI device whose resources were previously reserved by
3820 * pci_request_regions()
3821 *
3822 * Releases all PCI I/O and memory resources previously reserved by a
3823 * successful call to pci_request_regions(). Call this function only
3824 * after all use of the PCI regions has ceased.
3825 */
3826
pci_release_regions(struct pci_dev * pdev)3827 void pci_release_regions(struct pci_dev *pdev)
3828 {
3829 pci_release_selected_regions(pdev, (1 << 6) - 1);
3830 }
3831 EXPORT_SYMBOL(pci_release_regions);
3832
3833 /**
3834 * pci_request_regions - Reserve PCI I/O and memory resources
3835 * @pdev: PCI device whose resources are to be reserved
3836 * @res_name: Name to be associated with resource.
3837 *
3838 * Mark all PCI regions associated with PCI device @pdev as
3839 * being reserved by owner @res_name. Do not access any
3840 * address inside the PCI regions unless this call returns
3841 * successfully.
3842 *
3843 * Returns 0 on success, or %EBUSY on error. A warning
3844 * message is also printed on failure.
3845 */
pci_request_regions(struct pci_dev * pdev,const char * res_name)3846 int pci_request_regions(struct pci_dev *pdev, const char *res_name)
3847 {
3848 return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
3849 }
3850 EXPORT_SYMBOL(pci_request_regions);
3851
3852 /**
3853 * pci_request_regions_exclusive - Reserve PCI I/O and memory resources
3854 * @pdev: PCI device whose resources are to be reserved
3855 * @res_name: Name to be associated with resource.
3856 *
3857 * Mark all PCI regions associated with PCI device @pdev as being reserved
3858 * by owner @res_name. Do not access any address inside the PCI regions
3859 * unless this call returns successfully.
3860 *
3861 * pci_request_regions_exclusive() will mark the region so that /dev/mem
3862 * and the sysfs MMIO access will not be allowed.
3863 *
3864 * Returns 0 on success, or %EBUSY on error. A warning message is also
3865 * printed on failure.
3866 */
pci_request_regions_exclusive(struct pci_dev * pdev,const char * res_name)3867 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
3868 {
3869 return pci_request_selected_regions_exclusive(pdev,
3870 ((1 << 6) - 1), res_name);
3871 }
3872 EXPORT_SYMBOL(pci_request_regions_exclusive);
3873
3874 /*
3875 * Record the PCI IO range (expressed as CPU physical address + size).
3876 * Return a negative value if an error has occurred, zero otherwise
3877 */
pci_register_io_range(struct fwnode_handle * fwnode,phys_addr_t addr,resource_size_t size)3878 int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
3879 resource_size_t size)
3880 {
3881 int ret = 0;
3882 #ifdef PCI_IOBASE
3883 struct logic_pio_hwaddr *range;
3884
3885 if (!size || addr + size < addr)
3886 return -EINVAL;
3887
3888 range = kzalloc(sizeof(*range), GFP_ATOMIC);
3889 if (!range)
3890 return -ENOMEM;
3891
3892 range->fwnode = fwnode;
3893 range->size = size;
3894 range->hw_start = addr;
3895 range->flags = LOGIC_PIO_CPU_MMIO;
3896
3897 ret = logic_pio_register_range(range);
3898 if (ret)
3899 kfree(range);
3900 #endif
3901
3902 return ret;
3903 }
3904
pci_pio_to_address(unsigned long pio)3905 phys_addr_t pci_pio_to_address(unsigned long pio)
3906 {
3907 phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;
3908
3909 #ifdef PCI_IOBASE
3910 if (pio >= MMIO_UPPER_LIMIT)
3911 return address;
3912
3913 address = logic_pio_to_hwaddr(pio);
3914 #endif
3915
3916 return address;
3917 }
3918
pci_address_to_pio(phys_addr_t address)3919 unsigned long __weak pci_address_to_pio(phys_addr_t address)
3920 {
3921 #ifdef PCI_IOBASE
3922 return logic_pio_trans_cpuaddr(address);
3923 #else
3924 if (address > IO_SPACE_LIMIT)
3925 return (unsigned long)-1;
3926
3927 return (unsigned long) address;
3928 #endif
3929 }
3930
3931 /**
3932 * pci_remap_iospace - Remap the memory mapped I/O space
3933 * @res: Resource describing the I/O space
3934 * @phys_addr: physical address of range to be mapped
3935 *
3936 * Remap the memory mapped I/O space described by the @res and the CPU
3937 * physical address @phys_addr into virtual address space. Only
3938 * architectures that have memory mapped IO functions defined (and the
3939 * PCI_IOBASE value defined) should call this function.
3940 */
pci_remap_iospace(const struct resource * res,phys_addr_t phys_addr)3941 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
3942 {
3943 #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
3944 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
3945
3946 if (!(res->flags & IORESOURCE_IO))
3947 return -EINVAL;
3948
3949 if (res->end > IO_SPACE_LIMIT)
3950 return -EINVAL;
3951
3952 return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
3953 pgprot_device(PAGE_KERNEL));
3954 #else
3955 /*
3956 * This architecture does not have memory mapped I/O space,
3957 * so this function should never be called
3958 */
3959 WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
3960 return -ENODEV;
3961 #endif
3962 }
3963 EXPORT_SYMBOL(pci_remap_iospace);
3964
3965 /**
3966 * pci_unmap_iospace - Unmap the memory mapped I/O space
3967 * @res: resource to be unmapped
3968 *
3969 * Unmap the CPU virtual address @res from virtual address space. Only
3970 * architectures that have memory mapped IO functions defined (and the
3971 * PCI_IOBASE value defined) should call this function.
3972 */
pci_unmap_iospace(struct resource * res)3973 void pci_unmap_iospace(struct resource *res)
3974 {
3975 #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
3976 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
3977
3978 unmap_kernel_range(vaddr, resource_size(res));
3979 #endif
3980 }
3981 EXPORT_SYMBOL(pci_unmap_iospace);
3982
devm_pci_unmap_iospace(struct device * dev,void * ptr)3983 static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
3984 {
3985 struct resource **res = ptr;
3986
3987 pci_unmap_iospace(*res);
3988 }
3989
3990 /**
3991 * devm_pci_remap_iospace - Managed pci_remap_iospace()
3992 * @dev: Generic device to remap IO address for
3993 * @res: Resource describing the I/O space
3994 * @phys_addr: physical address of range to be mapped
3995 *
3996 * Managed pci_remap_iospace(). Map is automatically unmapped on driver
3997 * detach.
3998 */
devm_pci_remap_iospace(struct device * dev,const struct resource * res,phys_addr_t phys_addr)3999 int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
4000 phys_addr_t phys_addr)
4001 {
4002 const struct resource **ptr;
4003 int error;
4004
4005 ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
4006 if (!ptr)
4007 return -ENOMEM;
4008
4009 error = pci_remap_iospace(res, phys_addr);
4010 if (error) {
4011 devres_free(ptr);
4012 } else {
4013 *ptr = res;
4014 devres_add(dev, ptr);
4015 }
4016
4017 return error;
4018 }
4019 EXPORT_SYMBOL(devm_pci_remap_iospace);
4020
4021 /**
4022 * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
4023 * @dev: Generic device to remap IO address for
4024 * @offset: Resource address to map
4025 * @size: Size of map
4026 *
4027 * Managed pci_remap_cfgspace(). Map is automatically unmapped on driver
4028 * detach.
4029 */
devm_pci_remap_cfgspace(struct device * dev,resource_size_t offset,resource_size_t size)4030 void __iomem *devm_pci_remap_cfgspace(struct device *dev,
4031 resource_size_t offset,
4032 resource_size_t size)
4033 {
4034 void __iomem **ptr, *addr;
4035
4036 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
4037 if (!ptr)
4038 return NULL;
4039
4040 addr = pci_remap_cfgspace(offset, size);
4041 if (addr) {
4042 *ptr = addr;
4043 devres_add(dev, ptr);
4044 } else
4045 devres_free(ptr);
4046
4047 return addr;
4048 }
4049 EXPORT_SYMBOL(devm_pci_remap_cfgspace);
4050
4051 /**
4052 * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource
4053 * @dev: generic device to handle the resource for
4054 * @res: configuration space resource to be handled
4055 *
4056 * Checks that a resource is a valid memory region, requests the memory
4057 * region and ioremaps with pci_remap_cfgspace() API that ensures the
4058 * proper PCI configuration space memory attributes are guaranteed.
4059 *
4060 * All operations are managed and will be undone on driver detach.
4061 *
4062 * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
4063 * on failure. Usage example::
4064 *
4065 * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4066 * base = devm_pci_remap_cfg_resource(&pdev->dev, res);
4067 * if (IS_ERR(base))
4068 * return PTR_ERR(base);
4069 */
devm_pci_remap_cfg_resource(struct device * dev,struct resource * res)4070 void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
4071 struct resource *res)
4072 {
4073 resource_size_t size;
4074 const char *name;
4075 void __iomem *dest_ptr;
4076
4077 BUG_ON(!dev);
4078
4079 if (!res || resource_type(res) != IORESOURCE_MEM) {
4080 dev_err(dev, "invalid resource\n");
4081 return IOMEM_ERR_PTR(-EINVAL);
4082 }
4083
4084 size = resource_size(res);
4085 name = res->name ?: dev_name(dev);
4086
4087 if (!devm_request_mem_region(dev, res->start, size, name)) {
4088 dev_err(dev, "can't request region for resource %pR\n", res);
4089 return IOMEM_ERR_PTR(-EBUSY);
4090 }
4091
4092 dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
4093 if (!dest_ptr) {
4094 dev_err(dev, "ioremap failed for resource %pR\n", res);
4095 devm_release_mem_region(dev, res->start, size);
4096 dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
4097 }
4098
4099 return dest_ptr;
4100 }
4101 EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
4102
__pci_set_master(struct pci_dev * dev,bool enable)4103 static void __pci_set_master(struct pci_dev *dev, bool enable)
4104 {
4105 u16 old_cmd, cmd;
4106
4107 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
4108 if (enable)
4109 cmd = old_cmd | PCI_COMMAND_MASTER;
4110 else
4111 cmd = old_cmd & ~PCI_COMMAND_MASTER;
4112 if (cmd != old_cmd) {
4113 pci_dbg(dev, "%s bus mastering\n",
4114 enable ? "enabling" : "disabling");
4115 pci_write_config_word(dev, PCI_COMMAND, cmd);
4116 }
4117 dev->is_busmaster = enable;
4118 }
4119
4120 /**
4121 * pcibios_setup - process "pci=" kernel boot arguments
4122 * @str: string used to pass in "pci=" kernel boot arguments
4123 *
4124 * Process kernel boot arguments. This is the default implementation.
4125 * Architecture specific implementations can override this as necessary.
4126 */
pcibios_setup(char * str)4127 char * __weak __init pcibios_setup(char *str)
4128 {
4129 return str;
4130 }
4131
4132 /**
4133 * pcibios_set_master - enable PCI bus-mastering for device dev
4134 * @dev: the PCI device to enable
4135 *
4136 * Enables PCI bus-mastering for the device. This is the default
4137 * implementation. Architecture specific implementations can override
4138 * this if necessary.
4139 */
pcibios_set_master(struct pci_dev * dev)4140 void __weak pcibios_set_master(struct pci_dev *dev)
4141 {
4142 u8 lat;
4143
4144 /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
4145 if (pci_is_pcie(dev))
4146 return;
4147
4148 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
4149 if (lat < 16)
4150 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
4151 else if (lat > pcibios_max_latency)
4152 lat = pcibios_max_latency;
4153 else
4154 return;
4155
4156 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
4157 }
4158
4159 /**
4160 * pci_set_master - enables bus-mastering for device dev
4161 * @dev: the PCI device to enable
4162 *
4163 * Enables bus-mastering on the device and calls pcibios_set_master()
4164 * to do the needed arch specific settings.
4165 */
pci_set_master(struct pci_dev * dev)4166 void pci_set_master(struct pci_dev *dev)
4167 {
4168 __pci_set_master(dev, true);
4169 pcibios_set_master(dev);
4170 }
4171 EXPORT_SYMBOL(pci_set_master);
4172
4173 /**
4174 * pci_clear_master - disables bus-mastering for device dev
4175 * @dev: the PCI device to disable
4176 */
pci_clear_master(struct pci_dev * dev)4177 void pci_clear_master(struct pci_dev *dev)
4178 {
4179 __pci_set_master(dev, false);
4180 }
4181 EXPORT_SYMBOL(pci_clear_master);
4182
4183 /**
4184 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
4185 * @dev: the PCI device for which MWI is to be enabled
4186 *
4187 * Helper function for pci_set_mwi.
4188 * Originally copied from drivers/net/acenic.c.
4189 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
4190 *
4191 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4192 */
pci_set_cacheline_size(struct pci_dev * dev)4193 int pci_set_cacheline_size(struct pci_dev *dev)
4194 {
4195 u8 cacheline_size;
4196
4197 if (!pci_cache_line_size)
4198 return -EINVAL;
4199
4200 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
4201 equal to or multiple of the right value. */
4202 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4203 if (cacheline_size >= pci_cache_line_size &&
4204 (cacheline_size % pci_cache_line_size) == 0)
4205 return 0;
4206
4207 /* Write the correct value. */
4208 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
4209 /* Read it back. */
4210 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4211 if (cacheline_size == pci_cache_line_size)
4212 return 0;
4213
4214 pci_info(dev, "cache line size of %d is not supported\n",
4215 pci_cache_line_size << 2);
4216
4217 return -EINVAL;
4218 }
4219 EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
4220
4221 /**
4222 * pci_set_mwi - enables memory-write-invalidate PCI transaction
4223 * @dev: the PCI device for which MWI is enabled
4224 *
4225 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4226 *
4227 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4228 */
pci_set_mwi(struct pci_dev * dev)4229 int pci_set_mwi(struct pci_dev *dev)
4230 {
4231 #ifdef PCI_DISABLE_MWI
4232 return 0;
4233 #else
4234 int rc;
4235 u16 cmd;
4236
4237 rc = pci_set_cacheline_size(dev);
4238 if (rc)
4239 return rc;
4240
4241 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4242 if (!(cmd & PCI_COMMAND_INVALIDATE)) {
4243 pci_dbg(dev, "enabling Mem-Wr-Inval\n");
4244 cmd |= PCI_COMMAND_INVALIDATE;
4245 pci_write_config_word(dev, PCI_COMMAND, cmd);
4246 }
4247 return 0;
4248 #endif
4249 }
4250 EXPORT_SYMBOL(pci_set_mwi);
4251
4252 /**
4253 * pcim_set_mwi - a device-managed pci_set_mwi()
4254 * @dev: the PCI device for which MWI is enabled
4255 *
4256 * Managed pci_set_mwi().
4257 *
4258 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4259 */
pcim_set_mwi(struct pci_dev * dev)4260 int pcim_set_mwi(struct pci_dev *dev)
4261 {
4262 struct pci_devres *dr;
4263
4264 dr = find_pci_dr(dev);
4265 if (!dr)
4266 return -ENOMEM;
4267
4268 dr->mwi = 1;
4269 return pci_set_mwi(dev);
4270 }
4271 EXPORT_SYMBOL(pcim_set_mwi);
4272
4273 /**
4274 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
4275 * @dev: the PCI device for which MWI is enabled
4276 *
4277 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4278 * Callers are not required to check the return value.
4279 *
4280 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4281 */
pci_try_set_mwi(struct pci_dev * dev)4282 int pci_try_set_mwi(struct pci_dev *dev)
4283 {
4284 #ifdef PCI_DISABLE_MWI
4285 return 0;
4286 #else
4287 return pci_set_mwi(dev);
4288 #endif
4289 }
4290 EXPORT_SYMBOL(pci_try_set_mwi);
4291
4292 /**
4293 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
4294 * @dev: the PCI device to disable
4295 *
4296 * Disables PCI Memory-Write-Invalidate transaction on the device
4297 */
pci_clear_mwi(struct pci_dev * dev)4298 void pci_clear_mwi(struct pci_dev *dev)
4299 {
4300 #ifndef PCI_DISABLE_MWI
4301 u16 cmd;
4302
4303 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4304 if (cmd & PCI_COMMAND_INVALIDATE) {
4305 cmd &= ~PCI_COMMAND_INVALIDATE;
4306 pci_write_config_word(dev, PCI_COMMAND, cmd);
4307 }
4308 #endif
4309 }
4310 EXPORT_SYMBOL(pci_clear_mwi);
4311
4312 /**
4313 * pci_intx - enables/disables PCI INTx for device dev
4314 * @pdev: the PCI device to operate on
4315 * @enable: boolean: whether to enable or disable PCI INTx
4316 *
4317 * Enables/disables PCI INTx for device @pdev
4318 */
pci_intx(struct pci_dev * pdev,int enable)4319 void pci_intx(struct pci_dev *pdev, int enable)
4320 {
4321 u16 pci_command, new;
4322
4323 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
4324
4325 if (enable)
4326 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
4327 else
4328 new = pci_command | PCI_COMMAND_INTX_DISABLE;
4329
4330 if (new != pci_command) {
4331 struct pci_devres *dr;
4332
4333 pci_write_config_word(pdev, PCI_COMMAND, new);
4334
4335 dr = find_pci_dr(pdev);
4336 if (dr && !dr->restore_intx) {
4337 dr->restore_intx = 1;
4338 dr->orig_intx = !enable;
4339 }
4340 }
4341 }
4342 EXPORT_SYMBOL_GPL(pci_intx);
4343
pci_check_and_set_intx_mask(struct pci_dev * dev,bool mask)4344 static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
4345 {
4346 struct pci_bus *bus = dev->bus;
4347 bool mask_updated = true;
4348 u32 cmd_status_dword;
4349 u16 origcmd, newcmd;
4350 unsigned long flags;
4351 bool irq_pending;
4352
4353 /*
4354 * We do a single dword read to retrieve both command and status.
4355 * Document assumptions that make this possible.
4356 */
4357 BUILD_BUG_ON(PCI_COMMAND % 4);
4358 BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
4359
4360 raw_spin_lock_irqsave(&pci_lock, flags);
4361
4362 bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
4363
4364 irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
4365
4366 /*
4367 * Check interrupt status register to see whether our device
4368 * triggered the interrupt (when masking) or the next IRQ is
4369 * already pending (when unmasking).
4370 */
4371 if (mask != irq_pending) {
4372 mask_updated = false;
4373 goto done;
4374 }
4375
4376 origcmd = cmd_status_dword;
4377 newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
4378 if (mask)
4379 newcmd |= PCI_COMMAND_INTX_DISABLE;
4380 if (newcmd != origcmd)
4381 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
4382
4383 done:
4384 raw_spin_unlock_irqrestore(&pci_lock, flags);
4385
4386 return mask_updated;
4387 }
4388
4389 /**
4390 * pci_check_and_mask_intx - mask INTx on pending interrupt
4391 * @dev: the PCI device to operate on
4392 *
4393 * Check if the device dev has its INTx line asserted, mask it and return
4394 * true in that case. False is returned if no interrupt was pending.
4395 */
pci_check_and_mask_intx(struct pci_dev * dev)4396 bool pci_check_and_mask_intx(struct pci_dev *dev)
4397 {
4398 return pci_check_and_set_intx_mask(dev, true);
4399 }
4400 EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
4401
4402 /**
4403 * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending
4404 * @dev: the PCI device to operate on
4405 *
4406 * Check if the device dev has its INTx line asserted, unmask it if not and
4407 * return true. False is returned and the mask remains active if there was
4408 * still an interrupt pending.
4409 */
pci_check_and_unmask_intx(struct pci_dev * dev)4410 bool pci_check_and_unmask_intx(struct pci_dev *dev)
4411 {
4412 return pci_check_and_set_intx_mask(dev, false);
4413 }
4414 EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
4415
4416 /**
4417 * pci_wait_for_pending_transaction - wait for pending transaction
4418 * @dev: the PCI device to operate on
4419 *
4420 * Return 0 if transaction is pending 1 otherwise.
4421 */
pci_wait_for_pending_transaction(struct pci_dev * dev)4422 int pci_wait_for_pending_transaction(struct pci_dev *dev)
4423 {
4424 if (!pci_is_pcie(dev))
4425 return 1;
4426
4427 return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
4428 PCI_EXP_DEVSTA_TRPND);
4429 }
4430 EXPORT_SYMBOL(pci_wait_for_pending_transaction);
4431
pci_dev_wait(struct pci_dev * dev,char * reset_type,int timeout)4432 static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
4433 {
4434 int delay = 1;
4435 u32 id;
4436
4437 /*
4438 * After reset, the device should not silently discard config
4439 * requests, but it may still indicate that it needs more time by
4440 * responding to them with CRS completions. The Root Port will
4441 * generally synthesize ~0 data to complete the read (except when
4442 * CRS SV is enabled and the read was for the Vendor ID; in that
4443 * case it synthesizes 0x0001 data).
4444 *
4445 * Wait for the device to return a non-CRS completion. Read the
4446 * Command register instead of Vendor ID so we don't have to
4447 * contend with the CRS SV value.
4448 */
4449 pci_read_config_dword(dev, PCI_COMMAND, &id);
4450 while (id == ~0) {
4451 if (delay > timeout) {
4452 pci_warn(dev, "not ready %dms after %s; giving up\n",
4453 delay - 1, reset_type);
4454 return -ENOTTY;
4455 }
4456
4457 if (delay > 1000)
4458 pci_info(dev, "not ready %dms after %s; waiting\n",
4459 delay - 1, reset_type);
4460
4461 msleep(delay);
4462 delay *= 2;
4463 pci_read_config_dword(dev, PCI_COMMAND, &id);
4464 }
4465
4466 if (delay > 1000)
4467 pci_info(dev, "ready %dms after %s\n", delay - 1,
4468 reset_type);
4469
4470 return 0;
4471 }
4472
4473 /**
4474 * pcie_has_flr - check if a device supports function level resets
4475 * @dev: device to check
4476 *
4477 * Returns true if the device advertises support for PCIe function level
4478 * resets.
4479 */
pcie_has_flr(struct pci_dev * dev)4480 bool pcie_has_flr(struct pci_dev *dev)
4481 {
4482 u32 cap;
4483
4484 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4485 return false;
4486
4487 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
4488 return cap & PCI_EXP_DEVCAP_FLR;
4489 }
4490 EXPORT_SYMBOL_GPL(pcie_has_flr);
4491
4492 /**
4493 * pcie_flr - initiate a PCIe function level reset
4494 * @dev: device to reset
4495 *
4496 * Initiate a function level reset on @dev. The caller should ensure the
4497 * device supports FLR before calling this function, e.g. by using the
4498 * pcie_has_flr() helper.
4499 */
pcie_flr(struct pci_dev * dev)4500 int pcie_flr(struct pci_dev *dev)
4501 {
4502 if (!pci_wait_for_pending_transaction(dev))
4503 pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
4504
4505 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
4506
4507 if (dev->imm_ready)
4508 return 0;
4509
4510 /*
4511 * Per PCIe r4.0, sec 6.6.2, a device must complete an FLR within
4512 * 100ms, but may silently discard requests while the FLR is in
4513 * progress. Wait 100ms before trying to access the device.
4514 */
4515 msleep(100);
4516
4517 return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS);
4518 }
4519 EXPORT_SYMBOL_GPL(pcie_flr);
4520
pci_af_flr(struct pci_dev * dev,int probe)4521 static int pci_af_flr(struct pci_dev *dev, int probe)
4522 {
4523 int pos;
4524 u8 cap;
4525
4526 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
4527 if (!pos)
4528 return -ENOTTY;
4529
4530 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4531 return -ENOTTY;
4532
4533 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
4534 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
4535 return -ENOTTY;
4536
4537 if (probe)
4538 return 0;
4539
4540 /*
4541 * Wait for Transaction Pending bit to clear. A word-aligned test
4542 * is used, so we use the control offset rather than status and shift
4543 * the test bit to match.
4544 */
4545 if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
4546 PCI_AF_STATUS_TP << 8))
4547 pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
4548
4549 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
4550
4551 if (dev->imm_ready)
4552 return 0;
4553
4554 /*
4555 * Per Advanced Capabilities for Conventional PCI ECN, 13 April 2006,
4556 * updated 27 July 2006; a device must complete an FLR within
4557 * 100ms, but may silently discard requests while the FLR is in
4558 * progress. Wait 100ms before trying to access the device.
4559 */
4560 msleep(100);
4561
4562 return pci_dev_wait(dev, "AF_FLR", PCIE_RESET_READY_POLL_MS);
4563 }
4564
4565 /**
4566 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
4567 * @dev: Device to reset.
4568 * @probe: If set, only check if the device can be reset this way.
4569 *
4570 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
4571 * unset, it will be reinitialized internally when going from PCI_D3hot to
4572 * PCI_D0. If that's the case and the device is not in a low-power state
4573 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
4574 *
4575 * NOTE: This causes the caller to sleep for twice the device power transition
4576 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
4577 * by default (i.e. unless the @dev's d3_delay field has a different value).
4578 * Moreover, only devices in D0 can be reset by this function.
4579 */
pci_pm_reset(struct pci_dev * dev,int probe)4580 static int pci_pm_reset(struct pci_dev *dev, int probe)
4581 {
4582 u16 csr;
4583
4584 if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
4585 return -ENOTTY;
4586
4587 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
4588 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
4589 return -ENOTTY;
4590
4591 if (probe)
4592 return 0;
4593
4594 if (dev->current_state != PCI_D0)
4595 return -EINVAL;
4596
4597 csr &= ~PCI_PM_CTRL_STATE_MASK;
4598 csr |= PCI_D3hot;
4599 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4600 pci_dev_d3_sleep(dev);
4601
4602 csr &= ~PCI_PM_CTRL_STATE_MASK;
4603 csr |= PCI_D0;
4604 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4605 pci_dev_d3_sleep(dev);
4606
4607 return pci_dev_wait(dev, "PM D3->D0", PCIE_RESET_READY_POLL_MS);
4608 }
4609 /**
4610 * pcie_wait_for_link - Wait until link is active or inactive
4611 * @pdev: Bridge device
4612 * @active: waiting for active or inactive?
4613 *
4614 * Use this to wait till link becomes active or inactive.
4615 */
pcie_wait_for_link(struct pci_dev * pdev,bool active)4616 bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
4617 {
4618 int timeout = 1000;
4619 bool ret;
4620 u16 lnk_status;
4621
4622 /*
4623 * Some controllers might not implement link active reporting. In this
4624 * case, we wait for 1000 + 100 ms.
4625 */
4626 if (!pdev->link_active_reporting) {
4627 msleep(1100);
4628 return true;
4629 }
4630
4631 /*
4632 * PCIe r4.0 sec 6.6.1, a component must enter LTSSM Detect within 20ms,
4633 * after which we should expect an link active if the reset was
4634 * successful. If so, software must wait a minimum 100ms before sending
4635 * configuration requests to devices downstream this port.
4636 *
4637 * If the link fails to activate, either the device was physically
4638 * removed or the link is permanently failed.
4639 */
4640 if (active)
4641 msleep(20);
4642 for (;;) {
4643 pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
4644 ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
4645 if (ret == active)
4646 break;
4647 if (timeout <= 0)
4648 break;
4649 msleep(10);
4650 timeout -= 10;
4651 }
4652 if (active && ret)
4653 msleep(100);
4654 else if (ret != active)
4655 pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
4656 active ? "set" : "cleared");
4657 return ret == active;
4658 }
4659
pci_reset_secondary_bus(struct pci_dev * dev)4660 void pci_reset_secondary_bus(struct pci_dev *dev)
4661 {
4662 u16 ctrl;
4663
4664 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
4665 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
4666 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4667
4668 /*
4669 * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms. Double
4670 * this to 2ms to ensure that we meet the minimum requirement.
4671 */
4672 msleep(2);
4673
4674 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
4675 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4676
4677 /*
4678 * Trhfa for conventional PCI is 2^25 clock cycles.
4679 * Assuming a minimum 33MHz clock this results in a 1s
4680 * delay before we can consider subordinate devices to
4681 * be re-initialized. PCIe has some ways to shorten this,
4682 * but we don't make use of them yet.
4683 */
4684 ssleep(1);
4685 }
4686
pcibios_reset_secondary_bus(struct pci_dev * dev)4687 void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
4688 {
4689 pci_reset_secondary_bus(dev);
4690 }
4691
4692 /**
4693 * pci_bridge_secondary_bus_reset - Reset the secondary bus on a PCI bridge.
4694 * @dev: Bridge device
4695 *
4696 * Use the bridge control register to assert reset on the secondary bus.
4697 * Devices on the secondary bus are left in power-on state.
4698 */
pci_bridge_secondary_bus_reset(struct pci_dev * dev)4699 int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
4700 {
4701 pcibios_reset_secondary_bus(dev);
4702
4703 return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS);
4704 }
4705 EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
4706
pci_parent_bus_reset(struct pci_dev * dev,int probe)4707 static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
4708 {
4709 struct pci_dev *pdev;
4710
4711 if (pci_is_root_bus(dev->bus) || dev->subordinate ||
4712 !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4713 return -ENOTTY;
4714
4715 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
4716 if (pdev != dev)
4717 return -ENOTTY;
4718
4719 if (probe)
4720 return 0;
4721
4722 return pci_bridge_secondary_bus_reset(dev->bus->self);
4723 }
4724
pci_reset_hotplug_slot(struct hotplug_slot * hotplug,int probe)4725 static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
4726 {
4727 int rc = -ENOTTY;
4728
4729 if (!hotplug || !try_module_get(hotplug->owner))
4730 return rc;
4731
4732 if (hotplug->ops->reset_slot)
4733 rc = hotplug->ops->reset_slot(hotplug, probe);
4734
4735 module_put(hotplug->owner);
4736
4737 return rc;
4738 }
4739
pci_dev_reset_slot_function(struct pci_dev * dev,int probe)4740 static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
4741 {
4742 struct pci_dev *pdev;
4743
4744 if (dev->subordinate || !dev->slot ||
4745 dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4746 return -ENOTTY;
4747
4748 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
4749 if (pdev != dev && pdev->slot == dev->slot)
4750 return -ENOTTY;
4751
4752 return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
4753 }
4754
pci_dev_lock(struct pci_dev * dev)4755 static void pci_dev_lock(struct pci_dev *dev)
4756 {
4757 pci_cfg_access_lock(dev);
4758 /* block PM suspend, driver probe, etc. */
4759 device_lock(&dev->dev);
4760 }
4761
4762 /* Return 1 on successful lock, 0 on contention */
pci_dev_trylock(struct pci_dev * dev)4763 static int pci_dev_trylock(struct pci_dev *dev)
4764 {
4765 if (pci_cfg_access_trylock(dev)) {
4766 if (device_trylock(&dev->dev))
4767 return 1;
4768 pci_cfg_access_unlock(dev);
4769 }
4770
4771 return 0;
4772 }
4773
pci_dev_unlock(struct pci_dev * dev)4774 static void pci_dev_unlock(struct pci_dev *dev)
4775 {
4776 device_unlock(&dev->dev);
4777 pci_cfg_access_unlock(dev);
4778 }
4779
pci_dev_save_and_disable(struct pci_dev * dev)4780 static void pci_dev_save_and_disable(struct pci_dev *dev)
4781 {
4782 const struct pci_error_handlers *err_handler =
4783 dev->driver ? dev->driver->err_handler : NULL;
4784
4785 /*
4786 * dev->driver->err_handler->reset_prepare() is protected against
4787 * races with ->remove() by the device lock, which must be held by
4788 * the caller.
4789 */
4790 if (err_handler && err_handler->reset_prepare)
4791 err_handler->reset_prepare(dev);
4792
4793 /*
4794 * Wake-up device prior to save. PM registers default to D0 after
4795 * reset and a simple register restore doesn't reliably return
4796 * to a non-D0 state anyway.
4797 */
4798 pci_set_power_state(dev, PCI_D0);
4799
4800 pci_save_state(dev);
4801 /*
4802 * Disable the device by clearing the Command register, except for
4803 * INTx-disable which is set. This not only disables MMIO and I/O port
4804 * BARs, but also prevents the device from being Bus Master, preventing
4805 * DMA from the device including MSI/MSI-X interrupts. For PCI 2.3
4806 * compliant devices, INTx-disable prevents legacy interrupts.
4807 */
4808 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
4809 }
4810
pci_dev_restore(struct pci_dev * dev)4811 static void pci_dev_restore(struct pci_dev *dev)
4812 {
4813 const struct pci_error_handlers *err_handler =
4814 dev->driver ? dev->driver->err_handler : NULL;
4815
4816 pci_restore_state(dev);
4817
4818 /*
4819 * dev->driver->err_handler->reset_done() is protected against
4820 * races with ->remove() by the device lock, which must be held by
4821 * the caller.
4822 */
4823 if (err_handler && err_handler->reset_done)
4824 err_handler->reset_done(dev);
4825 }
4826
4827 /**
4828 * __pci_reset_function_locked - reset a PCI device function while holding
4829 * the @dev mutex lock.
4830 * @dev: PCI device to reset
4831 *
4832 * Some devices allow an individual function to be reset without affecting
4833 * other functions in the same device. The PCI device must be responsive
4834 * to PCI config space in order to use this function.
4835 *
4836 * The device function is presumed to be unused and the caller is holding
4837 * the device mutex lock when this function is called.
4838 *
4839 * Resetting the device will make the contents of PCI configuration space
4840 * random, so any caller of this must be prepared to reinitialise the
4841 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
4842 * etc.
4843 *
4844 * Returns 0 if the device function was successfully reset or negative if the
4845 * device doesn't support resetting a single function.
4846 */
__pci_reset_function_locked(struct pci_dev * dev)4847 int __pci_reset_function_locked(struct pci_dev *dev)
4848 {
4849 int rc;
4850
4851 might_sleep();
4852
4853 /*
4854 * A reset method returns -ENOTTY if it doesn't support this device
4855 * and we should try the next method.
4856 *
4857 * If it returns 0 (success), we're finished. If it returns any
4858 * other error, we're also finished: this indicates that further
4859 * reset mechanisms might be broken on the device.
4860 */
4861 rc = pci_dev_specific_reset(dev, 0);
4862 if (rc != -ENOTTY)
4863 return rc;
4864 if (pcie_has_flr(dev)) {
4865 rc = pcie_flr(dev);
4866 if (rc != -ENOTTY)
4867 return rc;
4868 }
4869 rc = pci_af_flr(dev, 0);
4870 if (rc != -ENOTTY)
4871 return rc;
4872 rc = pci_pm_reset(dev, 0);
4873 if (rc != -ENOTTY)
4874 return rc;
4875 rc = pci_dev_reset_slot_function(dev, 0);
4876 if (rc != -ENOTTY)
4877 return rc;
4878 return pci_parent_bus_reset(dev, 0);
4879 }
4880 EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
4881
4882 /**
4883 * pci_probe_reset_function - check whether the device can be safely reset
4884 * @dev: PCI device to reset
4885 *
4886 * Some devices allow an individual function to be reset without affecting
4887 * other functions in the same device. The PCI device must be responsive
4888 * to PCI config space in order to use this function.
4889 *
4890 * Returns 0 if the device function can be reset or negative if the
4891 * device doesn't support resetting a single function.
4892 */
pci_probe_reset_function(struct pci_dev * dev)4893 int pci_probe_reset_function(struct pci_dev *dev)
4894 {
4895 int rc;
4896
4897 might_sleep();
4898
4899 rc = pci_dev_specific_reset(dev, 1);
4900 if (rc != -ENOTTY)
4901 return rc;
4902 if (pcie_has_flr(dev))
4903 return 0;
4904 rc = pci_af_flr(dev, 1);
4905 if (rc != -ENOTTY)
4906 return rc;
4907 rc = pci_pm_reset(dev, 1);
4908 if (rc != -ENOTTY)
4909 return rc;
4910 rc = pci_dev_reset_slot_function(dev, 1);
4911 if (rc != -ENOTTY)
4912 return rc;
4913
4914 return pci_parent_bus_reset(dev, 1);
4915 }
4916
4917 /**
4918 * pci_reset_function - quiesce and reset a PCI device function
4919 * @dev: PCI device to reset
4920 *
4921 * Some devices allow an individual function to be reset without affecting
4922 * other functions in the same device. The PCI device must be responsive
4923 * to PCI config space in order to use this function.
4924 *
4925 * This function does not just reset the PCI portion of a device, but
4926 * clears all the state associated with the device. This function differs
4927 * from __pci_reset_function_locked() in that it saves and restores device state
4928 * over the reset and takes the PCI device lock.
4929 *
4930 * Returns 0 if the device function was successfully reset or negative if the
4931 * device doesn't support resetting a single function.
4932 */
pci_reset_function(struct pci_dev * dev)4933 int pci_reset_function(struct pci_dev *dev)
4934 {
4935 int rc;
4936
4937 if (!dev->reset_fn)
4938 return -ENOTTY;
4939
4940 pci_dev_lock(dev);
4941 pci_dev_save_and_disable(dev);
4942
4943 rc = __pci_reset_function_locked(dev);
4944
4945 pci_dev_restore(dev);
4946 pci_dev_unlock(dev);
4947
4948 return rc;
4949 }
4950 EXPORT_SYMBOL_GPL(pci_reset_function);
4951
4952 /**
4953 * pci_reset_function_locked - quiesce and reset a PCI device function
4954 * @dev: PCI device to reset
4955 *
4956 * Some devices allow an individual function to be reset without affecting
4957 * other functions in the same device. The PCI device must be responsive
4958 * to PCI config space in order to use this function.
4959 *
4960 * This function does not just reset the PCI portion of a device, but
4961 * clears all the state associated with the device. This function differs
4962 * from __pci_reset_function_locked() in that it saves and restores device state
4963 * over the reset. It also differs from pci_reset_function() in that it
4964 * requires the PCI device lock to be held.
4965 *
4966 * Returns 0 if the device function was successfully reset or negative if the
4967 * device doesn't support resetting a single function.
4968 */
pci_reset_function_locked(struct pci_dev * dev)4969 int pci_reset_function_locked(struct pci_dev *dev)
4970 {
4971 int rc;
4972
4973 if (!dev->reset_fn)
4974 return -ENOTTY;
4975
4976 pci_dev_save_and_disable(dev);
4977
4978 rc = __pci_reset_function_locked(dev);
4979
4980 pci_dev_restore(dev);
4981
4982 return rc;
4983 }
4984 EXPORT_SYMBOL_GPL(pci_reset_function_locked);
4985
4986 /**
4987 * pci_try_reset_function - quiesce and reset a PCI device function
4988 * @dev: PCI device to reset
4989 *
4990 * Same as above, except return -EAGAIN if unable to lock device.
4991 */
pci_try_reset_function(struct pci_dev * dev)4992 int pci_try_reset_function(struct pci_dev *dev)
4993 {
4994 int rc;
4995
4996 if (!dev->reset_fn)
4997 return -ENOTTY;
4998
4999 if (!pci_dev_trylock(dev))
5000 return -EAGAIN;
5001
5002 pci_dev_save_and_disable(dev);
5003 rc = __pci_reset_function_locked(dev);
5004 pci_dev_restore(dev);
5005 pci_dev_unlock(dev);
5006
5007 return rc;
5008 }
5009 EXPORT_SYMBOL_GPL(pci_try_reset_function);
5010
5011 /* Do any devices on or below this bus prevent a bus reset? */
pci_bus_resetable(struct pci_bus * bus)5012 static bool pci_bus_resetable(struct pci_bus *bus)
5013 {
5014 struct pci_dev *dev;
5015
5016
5017 if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5018 return false;
5019
5020 list_for_each_entry(dev, &bus->devices, bus_list) {
5021 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5022 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5023 return false;
5024 }
5025
5026 return true;
5027 }
5028
5029 /* Lock devices from the top of the tree down */
pci_bus_lock(struct pci_bus * bus)5030 static void pci_bus_lock(struct pci_bus *bus)
5031 {
5032 struct pci_dev *dev;
5033
5034 list_for_each_entry(dev, &bus->devices, bus_list) {
5035 pci_dev_lock(dev);
5036 if (dev->subordinate)
5037 pci_bus_lock(dev->subordinate);
5038 }
5039 }
5040
5041 /* Unlock devices from the bottom of the tree up */
pci_bus_unlock(struct pci_bus * bus)5042 static void pci_bus_unlock(struct pci_bus *bus)
5043 {
5044 struct pci_dev *dev;
5045
5046 list_for_each_entry(dev, &bus->devices, bus_list) {
5047 if (dev->subordinate)
5048 pci_bus_unlock(dev->subordinate);
5049 pci_dev_unlock(dev);
5050 }
5051 }
5052
5053 /* Return 1 on successful lock, 0 on contention */
pci_bus_trylock(struct pci_bus * bus)5054 static int pci_bus_trylock(struct pci_bus *bus)
5055 {
5056 struct pci_dev *dev;
5057
5058 list_for_each_entry(dev, &bus->devices, bus_list) {
5059 if (!pci_dev_trylock(dev))
5060 goto unlock;
5061 if (dev->subordinate) {
5062 if (!pci_bus_trylock(dev->subordinate)) {
5063 pci_dev_unlock(dev);
5064 goto unlock;
5065 }
5066 }
5067 }
5068 return 1;
5069
5070 unlock:
5071 list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
5072 if (dev->subordinate)
5073 pci_bus_unlock(dev->subordinate);
5074 pci_dev_unlock(dev);
5075 }
5076 return 0;
5077 }
5078
5079 /* Do any devices on or below this slot prevent a bus reset? */
pci_slot_resetable(struct pci_slot * slot)5080 static bool pci_slot_resetable(struct pci_slot *slot)
5081 {
5082 struct pci_dev *dev;
5083
5084 if (slot->bus->self &&
5085 (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5086 return false;
5087
5088 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5089 if (!dev->slot || dev->slot != slot)
5090 continue;
5091 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5092 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5093 return false;
5094 }
5095
5096 return true;
5097 }
5098
5099 /* Lock devices from the top of the tree down */
pci_slot_lock(struct pci_slot * slot)5100 static void pci_slot_lock(struct pci_slot *slot)
5101 {
5102 struct pci_dev *dev;
5103
5104 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5105 if (!dev->slot || dev->slot != slot)
5106 continue;
5107 pci_dev_lock(dev);
5108 if (dev->subordinate)
5109 pci_bus_lock(dev->subordinate);
5110 }
5111 }
5112
5113 /* Unlock devices from the bottom of the tree up */
pci_slot_unlock(struct pci_slot * slot)5114 static void pci_slot_unlock(struct pci_slot *slot)
5115 {
5116 struct pci_dev *dev;
5117
5118 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5119 if (!dev->slot || dev->slot != slot)
5120 continue;
5121 if (dev->subordinate)
5122 pci_bus_unlock(dev->subordinate);
5123 pci_dev_unlock(dev);
5124 }
5125 }
5126
5127 /* Return 1 on successful lock, 0 on contention */
pci_slot_trylock(struct pci_slot * slot)5128 static int pci_slot_trylock(struct pci_slot *slot)
5129 {
5130 struct pci_dev *dev;
5131
5132 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5133 if (!dev->slot || dev->slot != slot)
5134 continue;
5135 if (!pci_dev_trylock(dev))
5136 goto unlock;
5137 if (dev->subordinate) {
5138 if (!pci_bus_trylock(dev->subordinate)) {
5139 pci_dev_unlock(dev);
5140 goto unlock;
5141 }
5142 }
5143 }
5144 return 1;
5145
5146 unlock:
5147 list_for_each_entry_continue_reverse(dev,
5148 &slot->bus->devices, bus_list) {
5149 if (!dev->slot || dev->slot != slot)
5150 continue;
5151 if (dev->subordinate)
5152 pci_bus_unlock(dev->subordinate);
5153 pci_dev_unlock(dev);
5154 }
5155 return 0;
5156 }
5157
5158 /*
5159 * Save and disable devices from the top of the tree down while holding
5160 * the @dev mutex lock for the entire tree.
5161 */
pci_bus_save_and_disable_locked(struct pci_bus * bus)5162 static void pci_bus_save_and_disable_locked(struct pci_bus *bus)
5163 {
5164 struct pci_dev *dev;
5165
5166 list_for_each_entry(dev, &bus->devices, bus_list) {
5167 pci_dev_save_and_disable(dev);
5168 if (dev->subordinate)
5169 pci_bus_save_and_disable_locked(dev->subordinate);
5170 }
5171 }
5172
5173 /*
5174 * Restore devices from top of the tree down while holding @dev mutex lock
5175 * for the entire tree. Parent bridges need to be restored before we can
5176 * get to subordinate devices.
5177 */
pci_bus_restore_locked(struct pci_bus * bus)5178 static void pci_bus_restore_locked(struct pci_bus *bus)
5179 {
5180 struct pci_dev *dev;
5181
5182 list_for_each_entry(dev, &bus->devices, bus_list) {
5183 pci_dev_restore(dev);
5184 if (dev->subordinate)
5185 pci_bus_restore_locked(dev->subordinate);
5186 }
5187 }
5188
5189 /*
5190 * Save and disable devices from the top of the tree down while holding
5191 * the @dev mutex lock for the entire tree.
5192 */
pci_slot_save_and_disable_locked(struct pci_slot * slot)5193 static void pci_slot_save_and_disable_locked(struct pci_slot *slot)
5194 {
5195 struct pci_dev *dev;
5196
5197 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5198 if (!dev->slot || dev->slot != slot)
5199 continue;
5200 pci_dev_save_and_disable(dev);
5201 if (dev->subordinate)
5202 pci_bus_save_and_disable_locked(dev->subordinate);
5203 }
5204 }
5205
5206 /*
5207 * Restore devices from top of the tree down while holding @dev mutex lock
5208 * for the entire tree. Parent bridges need to be restored before we can
5209 * get to subordinate devices.
5210 */
pci_slot_restore_locked(struct pci_slot * slot)5211 static void pci_slot_restore_locked(struct pci_slot *slot)
5212 {
5213 struct pci_dev *dev;
5214
5215 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5216 if (!dev->slot || dev->slot != slot)
5217 continue;
5218 pci_dev_restore(dev);
5219 if (dev->subordinate)
5220 pci_bus_restore_locked(dev->subordinate);
5221 }
5222 }
5223
pci_slot_reset(struct pci_slot * slot,int probe)5224 static int pci_slot_reset(struct pci_slot *slot, int probe)
5225 {
5226 int rc;
5227
5228 if (!slot || !pci_slot_resetable(slot))
5229 return -ENOTTY;
5230
5231 if (!probe)
5232 pci_slot_lock(slot);
5233
5234 might_sleep();
5235
5236 rc = pci_reset_hotplug_slot(slot->hotplug, probe);
5237
5238 if (!probe)
5239 pci_slot_unlock(slot);
5240
5241 return rc;
5242 }
5243
5244 /**
5245 * pci_probe_reset_slot - probe whether a PCI slot can be reset
5246 * @slot: PCI slot to probe
5247 *
5248 * Return 0 if slot can be reset, negative if a slot reset is not supported.
5249 */
pci_probe_reset_slot(struct pci_slot * slot)5250 int pci_probe_reset_slot(struct pci_slot *slot)
5251 {
5252 return pci_slot_reset(slot, 1);
5253 }
5254 EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
5255
5256 /**
5257 * __pci_reset_slot - Try to reset a PCI slot
5258 * @slot: PCI slot to reset
5259 *
5260 * A PCI bus may host multiple slots, each slot may support a reset mechanism
5261 * independent of other slots. For instance, some slots may support slot power
5262 * control. In the case of a 1:1 bus to slot architecture, this function may
5263 * wrap the bus reset to avoid spurious slot related events such as hotplug.
5264 * Generally a slot reset should be attempted before a bus reset. All of the
5265 * function of the slot and any subordinate buses behind the slot are reset
5266 * through this function. PCI config space of all devices in the slot and
5267 * behind the slot is saved before and restored after reset.
5268 *
5269 * Same as above except return -EAGAIN if the slot cannot be locked
5270 */
__pci_reset_slot(struct pci_slot * slot)5271 static int __pci_reset_slot(struct pci_slot *slot)
5272 {
5273 int rc;
5274
5275 rc = pci_slot_reset(slot, 1);
5276 if (rc)
5277 return rc;
5278
5279 if (pci_slot_trylock(slot)) {
5280 pci_slot_save_and_disable_locked(slot);
5281 might_sleep();
5282 rc = pci_reset_hotplug_slot(slot->hotplug, 0);
5283 pci_slot_restore_locked(slot);
5284 pci_slot_unlock(slot);
5285 } else
5286 rc = -EAGAIN;
5287
5288 return rc;
5289 }
5290
pci_bus_reset(struct pci_bus * bus,int probe)5291 static int pci_bus_reset(struct pci_bus *bus, int probe)
5292 {
5293 int ret;
5294
5295 if (!bus->self || !pci_bus_resetable(bus))
5296 return -ENOTTY;
5297
5298 if (probe)
5299 return 0;
5300
5301 pci_bus_lock(bus);
5302
5303 might_sleep();
5304
5305 ret = pci_bridge_secondary_bus_reset(bus->self);
5306
5307 pci_bus_unlock(bus);
5308
5309 return ret;
5310 }
5311
5312 /**
5313 * pci_bus_error_reset - reset the bridge's subordinate bus
5314 * @bridge: The parent device that connects to the bus to reset
5315 *
5316 * This function will first try to reset the slots on this bus if the method is
5317 * available. If slot reset fails or is not available, this will fall back to a
5318 * secondary bus reset.
5319 */
pci_bus_error_reset(struct pci_dev * bridge)5320 int pci_bus_error_reset(struct pci_dev *bridge)
5321 {
5322 struct pci_bus *bus = bridge->subordinate;
5323 struct pci_slot *slot;
5324
5325 if (!bus)
5326 return -ENOTTY;
5327
5328 mutex_lock(&pci_slot_mutex);
5329 if (list_empty(&bus->slots))
5330 goto bus_reset;
5331
5332 list_for_each_entry(slot, &bus->slots, list)
5333 if (pci_probe_reset_slot(slot))
5334 goto bus_reset;
5335
5336 list_for_each_entry(slot, &bus->slots, list)
5337 if (pci_slot_reset(slot, 0))
5338 goto bus_reset;
5339
5340 mutex_unlock(&pci_slot_mutex);
5341 return 0;
5342 bus_reset:
5343 mutex_unlock(&pci_slot_mutex);
5344 return pci_bus_reset(bridge->subordinate, 0);
5345 }
5346
5347 /**
5348 * pci_probe_reset_bus - probe whether a PCI bus can be reset
5349 * @bus: PCI bus to probe
5350 *
5351 * Return 0 if bus can be reset, negative if a bus reset is not supported.
5352 */
pci_probe_reset_bus(struct pci_bus * bus)5353 int pci_probe_reset_bus(struct pci_bus *bus)
5354 {
5355 return pci_bus_reset(bus, 1);
5356 }
5357 EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
5358
5359 /**
5360 * __pci_reset_bus - Try to reset a PCI bus
5361 * @bus: top level PCI bus to reset
5362 *
5363 * Same as above except return -EAGAIN if the bus cannot be locked
5364 */
__pci_reset_bus(struct pci_bus * bus)5365 static int __pci_reset_bus(struct pci_bus *bus)
5366 {
5367 int rc;
5368
5369 rc = pci_bus_reset(bus, 1);
5370 if (rc)
5371 return rc;
5372
5373 if (pci_bus_trylock(bus)) {
5374 pci_bus_save_and_disable_locked(bus);
5375 might_sleep();
5376 rc = pci_bridge_secondary_bus_reset(bus->self);
5377 pci_bus_restore_locked(bus);
5378 pci_bus_unlock(bus);
5379 } else
5380 rc = -EAGAIN;
5381
5382 return rc;
5383 }
5384
5385 /**
5386 * pci_reset_bus - Try to reset a PCI bus
5387 * @pdev: top level PCI device to reset via slot/bus
5388 *
5389 * Same as above except return -EAGAIN if the bus cannot be locked
5390 */
pci_reset_bus(struct pci_dev * pdev)5391 int pci_reset_bus(struct pci_dev *pdev)
5392 {
5393 return (!pci_probe_reset_slot(pdev->slot)) ?
5394 __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
5395 }
5396 EXPORT_SYMBOL_GPL(pci_reset_bus);
5397
5398 /**
5399 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
5400 * @dev: PCI device to query
5401 *
5402 * Returns mmrbc: maximum designed memory read count in bytes or
5403 * appropriate error value.
5404 */
pcix_get_max_mmrbc(struct pci_dev * dev)5405 int pcix_get_max_mmrbc(struct pci_dev *dev)
5406 {
5407 int cap;
5408 u32 stat;
5409
5410 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5411 if (!cap)
5412 return -EINVAL;
5413
5414 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5415 return -EINVAL;
5416
5417 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
5418 }
5419 EXPORT_SYMBOL(pcix_get_max_mmrbc);
5420
5421 /**
5422 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
5423 * @dev: PCI device to query
5424 *
5425 * Returns mmrbc: maximum memory read count in bytes or appropriate error
5426 * value.
5427 */
pcix_get_mmrbc(struct pci_dev * dev)5428 int pcix_get_mmrbc(struct pci_dev *dev)
5429 {
5430 int cap;
5431 u16 cmd;
5432
5433 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5434 if (!cap)
5435 return -EINVAL;
5436
5437 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5438 return -EINVAL;
5439
5440 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
5441 }
5442 EXPORT_SYMBOL(pcix_get_mmrbc);
5443
5444 /**
5445 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
5446 * @dev: PCI device to query
5447 * @mmrbc: maximum memory read count in bytes
5448 * valid values are 512, 1024, 2048, 4096
5449 *
5450 * If possible sets maximum memory read byte count, some bridges have errata
5451 * that prevent this.
5452 */
pcix_set_mmrbc(struct pci_dev * dev,int mmrbc)5453 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
5454 {
5455 int cap;
5456 u32 stat, v, o;
5457 u16 cmd;
5458
5459 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
5460 return -EINVAL;
5461
5462 v = ffs(mmrbc) - 10;
5463
5464 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5465 if (!cap)
5466 return -EINVAL;
5467
5468 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5469 return -EINVAL;
5470
5471 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
5472 return -E2BIG;
5473
5474 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5475 return -EINVAL;
5476
5477 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
5478 if (o != v) {
5479 if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
5480 return -EIO;
5481
5482 cmd &= ~PCI_X_CMD_MAX_READ;
5483 cmd |= v << 2;
5484 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
5485 return -EIO;
5486 }
5487 return 0;
5488 }
5489 EXPORT_SYMBOL(pcix_set_mmrbc);
5490
5491 /**
5492 * pcie_get_readrq - get PCI Express read request size
5493 * @dev: PCI device to query
5494 *
5495 * Returns maximum memory read request in bytes or appropriate error value.
5496 */
pcie_get_readrq(struct pci_dev * dev)5497 int pcie_get_readrq(struct pci_dev *dev)
5498 {
5499 u16 ctl;
5500
5501 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5502
5503 return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5504 }
5505 EXPORT_SYMBOL(pcie_get_readrq);
5506
5507 /**
5508 * pcie_set_readrq - set PCI Express maximum memory read request
5509 * @dev: PCI device to query
5510 * @rq: maximum memory read count in bytes
5511 * valid values are 128, 256, 512, 1024, 2048, 4096
5512 *
5513 * If possible sets maximum memory read request in bytes
5514 */
pcie_set_readrq(struct pci_dev * dev,int rq)5515 int pcie_set_readrq(struct pci_dev *dev, int rq)
5516 {
5517 u16 v;
5518
5519 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
5520 return -EINVAL;
5521
5522 /*
5523 * If using the "performance" PCIe config, we clamp the read rq
5524 * size to the max packet size to keep the host bridge from
5525 * generating requests larger than we can cope with.
5526 */
5527 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
5528 int mps = pcie_get_mps(dev);
5529
5530 if (mps < rq)
5531 rq = mps;
5532 }
5533
5534 v = (ffs(rq) - 8) << 12;
5535
5536 return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5537 PCI_EXP_DEVCTL_READRQ, v);
5538 }
5539 EXPORT_SYMBOL(pcie_set_readrq);
5540
5541 /**
5542 * pcie_get_mps - get PCI Express maximum payload size
5543 * @dev: PCI device to query
5544 *
5545 * Returns maximum payload size in bytes
5546 */
pcie_get_mps(struct pci_dev * dev)5547 int pcie_get_mps(struct pci_dev *dev)
5548 {
5549 u16 ctl;
5550
5551 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5552
5553 return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5554 }
5555 EXPORT_SYMBOL(pcie_get_mps);
5556
5557 /**
5558 * pcie_set_mps - set PCI Express maximum payload size
5559 * @dev: PCI device to query
5560 * @mps: maximum payload size in bytes
5561 * valid values are 128, 256, 512, 1024, 2048, 4096
5562 *
5563 * If possible sets maximum payload size
5564 */
pcie_set_mps(struct pci_dev * dev,int mps)5565 int pcie_set_mps(struct pci_dev *dev, int mps)
5566 {
5567 u16 v;
5568
5569 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
5570 return -EINVAL;
5571
5572 v = ffs(mps) - 8;
5573 if (v > dev->pcie_mpss)
5574 return -EINVAL;
5575 v <<= 5;
5576
5577 return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5578 PCI_EXP_DEVCTL_PAYLOAD, v);
5579 }
5580 EXPORT_SYMBOL(pcie_set_mps);
5581
5582 /**
5583 * pcie_bandwidth_available - determine minimum link settings of a PCIe
5584 * device and its bandwidth limitation
5585 * @dev: PCI device to query
5586 * @limiting_dev: storage for device causing the bandwidth limitation
5587 * @speed: storage for speed of limiting device
5588 * @width: storage for width of limiting device
5589 *
5590 * Walk up the PCI device chain and find the point where the minimum
5591 * bandwidth is available. Return the bandwidth available there and (if
5592 * limiting_dev, speed, and width pointers are supplied) information about
5593 * that point. The bandwidth returned is in Mb/s, i.e., megabits/second of
5594 * raw bandwidth.
5595 */
pcie_bandwidth_available(struct pci_dev * dev,struct pci_dev ** limiting_dev,enum pci_bus_speed * speed,enum pcie_link_width * width)5596 u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
5597 enum pci_bus_speed *speed,
5598 enum pcie_link_width *width)
5599 {
5600 u16 lnksta;
5601 enum pci_bus_speed next_speed;
5602 enum pcie_link_width next_width;
5603 u32 bw, next_bw;
5604
5605 if (speed)
5606 *speed = PCI_SPEED_UNKNOWN;
5607 if (width)
5608 *width = PCIE_LNK_WIDTH_UNKNOWN;
5609
5610 bw = 0;
5611
5612 while (dev) {
5613 pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
5614
5615 next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
5616 next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
5617 PCI_EXP_LNKSTA_NLW_SHIFT;
5618
5619 next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
5620
5621 /* Check if current device limits the total bandwidth */
5622 if (!bw || next_bw <= bw) {
5623 bw = next_bw;
5624
5625 if (limiting_dev)
5626 *limiting_dev = dev;
5627 if (speed)
5628 *speed = next_speed;
5629 if (width)
5630 *width = next_width;
5631 }
5632
5633 dev = pci_upstream_bridge(dev);
5634 }
5635
5636 return bw;
5637 }
5638 EXPORT_SYMBOL(pcie_bandwidth_available);
5639
5640 /**
5641 * pcie_get_speed_cap - query for the PCI device's link speed capability
5642 * @dev: PCI device to query
5643 *
5644 * Query the PCI device speed capability. Return the maximum link speed
5645 * supported by the device.
5646 */
pcie_get_speed_cap(struct pci_dev * dev)5647 enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
5648 {
5649 u32 lnkcap2, lnkcap;
5650
5651 /*
5652 * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18. The
5653 * implementation note there recommends using the Supported Link
5654 * Speeds Vector in Link Capabilities 2 when supported.
5655 *
5656 * Without Link Capabilities 2, i.e., prior to PCIe r3.0, software
5657 * should use the Supported Link Speeds field in Link Capabilities,
5658 * where only 2.5 GT/s and 5.0 GT/s speeds were defined.
5659 */
5660 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
5661 if (lnkcap2) { /* PCIe r3.0-compliant */
5662 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_32_0GB)
5663 return PCIE_SPEED_32_0GT;
5664 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB)
5665 return PCIE_SPEED_16_0GT;
5666 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
5667 return PCIE_SPEED_8_0GT;
5668 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
5669 return PCIE_SPEED_5_0GT;
5670 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
5671 return PCIE_SPEED_2_5GT;
5672 return PCI_SPEED_UNKNOWN;
5673 }
5674
5675 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
5676 if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
5677 return PCIE_SPEED_5_0GT;
5678 else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB)
5679 return PCIE_SPEED_2_5GT;
5680
5681 return PCI_SPEED_UNKNOWN;
5682 }
5683 EXPORT_SYMBOL(pcie_get_speed_cap);
5684
5685 /**
5686 * pcie_get_width_cap - query for the PCI device's link width capability
5687 * @dev: PCI device to query
5688 *
5689 * Query the PCI device width capability. Return the maximum link width
5690 * supported by the device.
5691 */
pcie_get_width_cap(struct pci_dev * dev)5692 enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
5693 {
5694 u32 lnkcap;
5695
5696 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
5697 if (lnkcap)
5698 return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
5699
5700 return PCIE_LNK_WIDTH_UNKNOWN;
5701 }
5702 EXPORT_SYMBOL(pcie_get_width_cap);
5703
5704 /**
5705 * pcie_bandwidth_capable - calculate a PCI device's link bandwidth capability
5706 * @dev: PCI device
5707 * @speed: storage for link speed
5708 * @width: storage for link width
5709 *
5710 * Calculate a PCI device's link bandwidth by querying for its link speed
5711 * and width, multiplying them, and applying encoding overhead. The result
5712 * is in Mb/s, i.e., megabits/second of raw bandwidth.
5713 */
pcie_bandwidth_capable(struct pci_dev * dev,enum pci_bus_speed * speed,enum pcie_link_width * width)5714 u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
5715 enum pcie_link_width *width)
5716 {
5717 *speed = pcie_get_speed_cap(dev);
5718 *width = pcie_get_width_cap(dev);
5719
5720 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
5721 return 0;
5722
5723 return *width * PCIE_SPEED2MBS_ENC(*speed);
5724 }
5725
5726 /**
5727 * __pcie_print_link_status - Report the PCI device's link speed and width
5728 * @dev: PCI device to query
5729 * @verbose: Print info even when enough bandwidth is available
5730 *
5731 * If the available bandwidth at the device is less than the device is
5732 * capable of, report the device's maximum possible bandwidth and the
5733 * upstream link that limits its performance. If @verbose, always print
5734 * the available bandwidth, even if the device isn't constrained.
5735 */
__pcie_print_link_status(struct pci_dev * dev,bool verbose)5736 void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
5737 {
5738 enum pcie_link_width width, width_cap;
5739 enum pci_bus_speed speed, speed_cap;
5740 struct pci_dev *limiting_dev = NULL;
5741 u32 bw_avail, bw_cap;
5742
5743 bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
5744 bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
5745
5746 if (bw_avail >= bw_cap && verbose)
5747 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
5748 bw_cap / 1000, bw_cap % 1000,
5749 PCIE_SPEED2STR(speed_cap), width_cap);
5750 else if (bw_avail < bw_cap)
5751 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
5752 bw_avail / 1000, bw_avail % 1000,
5753 PCIE_SPEED2STR(speed), width,
5754 limiting_dev ? pci_name(limiting_dev) : "<unknown>",
5755 bw_cap / 1000, bw_cap % 1000,
5756 PCIE_SPEED2STR(speed_cap), width_cap);
5757 }
5758
5759 /**
5760 * pcie_print_link_status - Report the PCI device's link speed and width
5761 * @dev: PCI device to query
5762 *
5763 * Report the available bandwidth at the device.
5764 */
pcie_print_link_status(struct pci_dev * dev)5765 void pcie_print_link_status(struct pci_dev *dev)
5766 {
5767 __pcie_print_link_status(dev, true);
5768 }
5769 EXPORT_SYMBOL(pcie_print_link_status);
5770
5771 /**
5772 * pci_select_bars - Make BAR mask from the type of resource
5773 * @dev: the PCI device for which BAR mask is made
5774 * @flags: resource type mask to be selected
5775 *
5776 * This helper routine makes bar mask from the type of resource.
5777 */
pci_select_bars(struct pci_dev * dev,unsigned long flags)5778 int pci_select_bars(struct pci_dev *dev, unsigned long flags)
5779 {
5780 int i, bars = 0;
5781 for (i = 0; i < PCI_NUM_RESOURCES; i++)
5782 if (pci_resource_flags(dev, i) & flags)
5783 bars |= (1 << i);
5784 return bars;
5785 }
5786 EXPORT_SYMBOL(pci_select_bars);
5787
5788 /* Some architectures require additional programming to enable VGA */
5789 static arch_set_vga_state_t arch_set_vga_state;
5790
pci_register_set_vga_state(arch_set_vga_state_t func)5791 void __init pci_register_set_vga_state(arch_set_vga_state_t func)
5792 {
5793 arch_set_vga_state = func; /* NULL disables */
5794 }
5795
pci_set_vga_state_arch(struct pci_dev * dev,bool decode,unsigned int command_bits,u32 flags)5796 static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
5797 unsigned int command_bits, u32 flags)
5798 {
5799 if (arch_set_vga_state)
5800 return arch_set_vga_state(dev, decode, command_bits,
5801 flags);
5802 return 0;
5803 }
5804
5805 /**
5806 * pci_set_vga_state - set VGA decode state on device and parents if requested
5807 * @dev: the PCI device
5808 * @decode: true = enable decoding, false = disable decoding
5809 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
5810 * @flags: traverse ancestors and change bridges
5811 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
5812 */
pci_set_vga_state(struct pci_dev * dev,bool decode,unsigned int command_bits,u32 flags)5813 int pci_set_vga_state(struct pci_dev *dev, bool decode,
5814 unsigned int command_bits, u32 flags)
5815 {
5816 struct pci_bus *bus;
5817 struct pci_dev *bridge;
5818 u16 cmd;
5819 int rc;
5820
5821 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
5822
5823 /* ARCH specific VGA enables */
5824 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
5825 if (rc)
5826 return rc;
5827
5828 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
5829 pci_read_config_word(dev, PCI_COMMAND, &cmd);
5830 if (decode == true)
5831 cmd |= command_bits;
5832 else
5833 cmd &= ~command_bits;
5834 pci_write_config_word(dev, PCI_COMMAND, cmd);
5835 }
5836
5837 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
5838 return 0;
5839
5840 bus = dev->bus;
5841 while (bus) {
5842 bridge = bus->self;
5843 if (bridge) {
5844 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
5845 &cmd);
5846 if (decode == true)
5847 cmd |= PCI_BRIDGE_CTL_VGA;
5848 else
5849 cmd &= ~PCI_BRIDGE_CTL_VGA;
5850 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
5851 cmd);
5852 }
5853 bus = bus->parent;
5854 }
5855 return 0;
5856 }
5857
5858 #ifdef CONFIG_ACPI
pci_pr3_present(struct pci_dev * pdev)5859 bool pci_pr3_present(struct pci_dev *pdev)
5860 {
5861 struct acpi_device *adev;
5862
5863 if (acpi_disabled)
5864 return false;
5865
5866 adev = ACPI_COMPANION(&pdev->dev);
5867 if (!adev)
5868 return false;
5869
5870 return adev->power.flags.power_resources &&
5871 acpi_has_method(adev->handle, "_PR3");
5872 }
5873 EXPORT_SYMBOL_GPL(pci_pr3_present);
5874 #endif
5875
5876 /**
5877 * pci_add_dma_alias - Add a DMA devfn alias for a device
5878 * @dev: the PCI device for which alias is added
5879 * @devfn: alias slot and function
5880 *
5881 * This helper encodes an 8-bit devfn as a bit number in dma_alias_mask
5882 * which is used to program permissible bus-devfn source addresses for DMA
5883 * requests in an IOMMU. These aliases factor into IOMMU group creation
5884 * and are useful for devices generating DMA requests beyond or different
5885 * from their logical bus-devfn. Examples include device quirks where the
5886 * device simply uses the wrong devfn, as well as non-transparent bridges
5887 * where the alias may be a proxy for devices in another domain.
5888 *
5889 * IOMMU group creation is performed during device discovery or addition,
5890 * prior to any potential DMA mapping and therefore prior to driver probing
5891 * (especially for userspace assigned devices where IOMMU group definition
5892 * cannot be left as a userspace activity). DMA aliases should therefore
5893 * be configured via quirks, such as the PCI fixup header quirk.
5894 */
pci_add_dma_alias(struct pci_dev * dev,u8 devfn)5895 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn)
5896 {
5897 if (!dev->dma_alias_mask)
5898 dev->dma_alias_mask = bitmap_zalloc(U8_MAX, GFP_KERNEL);
5899 if (!dev->dma_alias_mask) {
5900 pci_warn(dev, "Unable to allocate DMA alias mask\n");
5901 return;
5902 }
5903
5904 set_bit(devfn, dev->dma_alias_mask);
5905 pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
5906 PCI_SLOT(devfn), PCI_FUNC(devfn));
5907 }
5908
pci_devs_are_dma_aliases(struct pci_dev * dev1,struct pci_dev * dev2)5909 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
5910 {
5911 return (dev1->dma_alias_mask &&
5912 test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
5913 (dev2->dma_alias_mask &&
5914 test_bit(dev1->devfn, dev2->dma_alias_mask));
5915 }
5916
pci_device_is_present(struct pci_dev * pdev)5917 bool pci_device_is_present(struct pci_dev *pdev)
5918 {
5919 u32 v;
5920
5921 if (pci_dev_is_disconnected(pdev))
5922 return false;
5923 return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
5924 }
5925 EXPORT_SYMBOL_GPL(pci_device_is_present);
5926
pci_ignore_hotplug(struct pci_dev * dev)5927 void pci_ignore_hotplug(struct pci_dev *dev)
5928 {
5929 struct pci_dev *bridge = dev->bus->self;
5930
5931 dev->ignore_hotplug = 1;
5932 /* Propagate the "ignore hotplug" setting to the parent bridge. */
5933 if (bridge)
5934 bridge->ignore_hotplug = 1;
5935 }
5936 EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
5937
pcibios_default_alignment(void)5938 resource_size_t __weak pcibios_default_alignment(void)
5939 {
5940 return 0;
5941 }
5942
5943 /*
5944 * Arches that don't want to expose struct resource to userland as-is in
5945 * sysfs and /proc can implement their own pci_resource_to_user().
5946 */
pci_resource_to_user(const struct pci_dev * dev,int bar,const struct resource * rsrc,resource_size_t * start,resource_size_t * end)5947 void __weak pci_resource_to_user(const struct pci_dev *dev, int bar,
5948 const struct resource *rsrc,
5949 resource_size_t *start, resource_size_t *end)
5950 {
5951 *start = rsrc->start;
5952 *end = rsrc->end;
5953 }
5954
5955 static char *resource_alignment_param;
5956 static DEFINE_SPINLOCK(resource_alignment_lock);
5957
5958 /**
5959 * pci_specified_resource_alignment - get resource alignment specified by user.
5960 * @dev: the PCI device to get
5961 * @resize: whether or not to change resources' size when reassigning alignment
5962 *
5963 * RETURNS: Resource alignment if it is specified.
5964 * Zero if it is not specified.
5965 */
pci_specified_resource_alignment(struct pci_dev * dev,bool * resize)5966 static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
5967 bool *resize)
5968 {
5969 int align_order, count;
5970 resource_size_t align = pcibios_default_alignment();
5971 const char *p;
5972 int ret;
5973
5974 spin_lock(&resource_alignment_lock);
5975 p = resource_alignment_param;
5976 if (!p || !*p)
5977 goto out;
5978 if (pci_has_flag(PCI_PROBE_ONLY)) {
5979 align = 0;
5980 pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
5981 goto out;
5982 }
5983
5984 while (*p) {
5985 count = 0;
5986 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
5987 p[count] == '@') {
5988 p += count + 1;
5989 } else {
5990 align_order = -1;
5991 }
5992
5993 ret = pci_dev_str_match(dev, p, &p);
5994 if (ret == 1) {
5995 *resize = true;
5996 if (align_order == -1)
5997 align = PAGE_SIZE;
5998 else
5999 align = 1 << align_order;
6000 break;
6001 } else if (ret < 0) {
6002 pr_err("PCI: Can't parse resource_alignment parameter: %s\n",
6003 p);
6004 break;
6005 }
6006
6007 if (*p != ';' && *p != ',') {
6008 /* End of param or invalid format */
6009 break;
6010 }
6011 p++;
6012 }
6013 out:
6014 spin_unlock(&resource_alignment_lock);
6015 return align;
6016 }
6017
pci_request_resource_alignment(struct pci_dev * dev,int bar,resource_size_t align,bool resize)6018 static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
6019 resource_size_t align, bool resize)
6020 {
6021 struct resource *r = &dev->resource[bar];
6022 resource_size_t size;
6023
6024 if (!(r->flags & IORESOURCE_MEM))
6025 return;
6026
6027 if (r->flags & IORESOURCE_PCI_FIXED) {
6028 pci_info(dev, "BAR%d %pR: ignoring requested alignment %#llx\n",
6029 bar, r, (unsigned long long)align);
6030 return;
6031 }
6032
6033 size = resource_size(r);
6034 if (size >= align)
6035 return;
6036
6037 /*
6038 * Increase the alignment of the resource. There are two ways we
6039 * can do this:
6040 *
6041 * 1) Increase the size of the resource. BARs are aligned on their
6042 * size, so when we reallocate space for this resource, we'll
6043 * allocate it with the larger alignment. This also prevents
6044 * assignment of any other BARs inside the alignment region, so
6045 * if we're requesting page alignment, this means no other BARs
6046 * will share the page.
6047 *
6048 * The disadvantage is that this makes the resource larger than
6049 * the hardware BAR, which may break drivers that compute things
6050 * based on the resource size, e.g., to find registers at a
6051 * fixed offset before the end of the BAR.
6052 *
6053 * 2) Retain the resource size, but use IORESOURCE_STARTALIGN and
6054 * set r->start to the desired alignment. By itself this
6055 * doesn't prevent other BARs being put inside the alignment
6056 * region, but if we realign *every* resource of every device in
6057 * the system, none of them will share an alignment region.
6058 *
6059 * When the user has requested alignment for only some devices via
6060 * the "pci=resource_alignment" argument, "resize" is true and we
6061 * use the first method. Otherwise we assume we're aligning all
6062 * devices and we use the second.
6063 */
6064
6065 pci_info(dev, "BAR%d %pR: requesting alignment to %#llx\n",
6066 bar, r, (unsigned long long)align);
6067
6068 if (resize) {
6069 r->start = 0;
6070 r->end = align - 1;
6071 } else {
6072 r->flags &= ~IORESOURCE_SIZEALIGN;
6073 r->flags |= IORESOURCE_STARTALIGN;
6074 r->start = align;
6075 r->end = r->start + size - 1;
6076 }
6077 r->flags |= IORESOURCE_UNSET;
6078 }
6079
6080 /*
6081 * This function disables memory decoding and releases memory resources
6082 * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
6083 * It also rounds up size to specified alignment.
6084 * Later on, the kernel will assign page-aligned memory resource back
6085 * to the device.
6086 */
pci_reassigndev_resource_alignment(struct pci_dev * dev)6087 void pci_reassigndev_resource_alignment(struct pci_dev *dev)
6088 {
6089 int i;
6090 struct resource *r;
6091 resource_size_t align;
6092 u16 command;
6093 bool resize = false;
6094
6095 /*
6096 * VF BARs are read-only zero according to SR-IOV spec r1.1, sec
6097 * 3.4.1.11. Their resources are allocated from the space
6098 * described by the VF BARx register in the PF's SR-IOV capability.
6099 * We can't influence their alignment here.
6100 */
6101 if (dev->is_virtfn)
6102 return;
6103
6104 /* check if specified PCI is target device to reassign */
6105 align = pci_specified_resource_alignment(dev, &resize);
6106 if (!align)
6107 return;
6108
6109 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
6110 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
6111 pci_warn(dev, "Can't reassign resources to host bridge\n");
6112 return;
6113 }
6114
6115 pci_read_config_word(dev, PCI_COMMAND, &command);
6116 command &= ~PCI_COMMAND_MEMORY;
6117 pci_write_config_word(dev, PCI_COMMAND, command);
6118
6119 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
6120 pci_request_resource_alignment(dev, i, align, resize);
6121
6122 /*
6123 * Need to disable bridge's resource window,
6124 * to enable the kernel to reassign new resource
6125 * window later on.
6126 */
6127 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
6128 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
6129 r = &dev->resource[i];
6130 if (!(r->flags & IORESOURCE_MEM))
6131 continue;
6132 r->flags |= IORESOURCE_UNSET;
6133 r->end = resource_size(r) - 1;
6134 r->start = 0;
6135 }
6136 pci_disable_bridge_window(dev);
6137 }
6138 }
6139
resource_alignment_show(struct bus_type * bus,char * buf)6140 static ssize_t resource_alignment_show(struct bus_type *bus, char *buf)
6141 {
6142 size_t count = 0;
6143
6144 spin_lock(&resource_alignment_lock);
6145 if (resource_alignment_param)
6146 count = snprintf(buf, PAGE_SIZE, "%s", resource_alignment_param);
6147 spin_unlock(&resource_alignment_lock);
6148
6149 /*
6150 * When set by the command line, resource_alignment_param will not
6151 * have a trailing line feed, which is ugly. So conditionally add
6152 * it here.
6153 */
6154 if (count >= 2 && buf[count - 2] != '\n' && count < PAGE_SIZE - 1) {
6155 buf[count - 1] = '\n';
6156 buf[count++] = 0;
6157 }
6158
6159 return count;
6160 }
6161
resource_alignment_store(struct bus_type * bus,const char * buf,size_t count)6162 static ssize_t resource_alignment_store(struct bus_type *bus,
6163 const char *buf, size_t count)
6164 {
6165 char *param = kstrndup(buf, count, GFP_KERNEL);
6166
6167 if (!param)
6168 return -ENOMEM;
6169
6170 spin_lock(&resource_alignment_lock);
6171 kfree(resource_alignment_param);
6172 resource_alignment_param = param;
6173 spin_unlock(&resource_alignment_lock);
6174 return count;
6175 }
6176
6177 static BUS_ATTR_RW(resource_alignment);
6178
pci_resource_alignment_sysfs_init(void)6179 static int __init pci_resource_alignment_sysfs_init(void)
6180 {
6181 return bus_create_file(&pci_bus_type,
6182 &bus_attr_resource_alignment);
6183 }
6184 late_initcall(pci_resource_alignment_sysfs_init);
6185
pci_no_domains(void)6186 static void pci_no_domains(void)
6187 {
6188 #ifdef CONFIG_PCI_DOMAINS
6189 pci_domains_supported = 0;
6190 #endif
6191 }
6192
6193 #ifdef CONFIG_PCI_DOMAINS_GENERIC
6194 static atomic_t __domain_nr = ATOMIC_INIT(-1);
6195
pci_get_new_domain_nr(void)6196 static int pci_get_new_domain_nr(void)
6197 {
6198 return atomic_inc_return(&__domain_nr);
6199 }
6200
of_pci_bus_find_domain_nr(struct device * parent)6201 static int of_pci_bus_find_domain_nr(struct device *parent)
6202 {
6203 static int use_dt_domains = -1;
6204 int domain = -1;
6205
6206 if (parent)
6207 domain = of_get_pci_domain_nr(parent->of_node);
6208
6209 /*
6210 * Check DT domain and use_dt_domains values.
6211 *
6212 * If DT domain property is valid (domain >= 0) and
6213 * use_dt_domains != 0, the DT assignment is valid since this means
6214 * we have not previously allocated a domain number by using
6215 * pci_get_new_domain_nr(); we should also update use_dt_domains to
6216 * 1, to indicate that we have just assigned a domain number from
6217 * DT.
6218 *
6219 * If DT domain property value is not valid (ie domain < 0), and we
6220 * have not previously assigned a domain number from DT
6221 * (use_dt_domains != 1) we should assign a domain number by
6222 * using the:
6223 *
6224 * pci_get_new_domain_nr()
6225 *
6226 * API and update the use_dt_domains value to keep track of method we
6227 * are using to assign domain numbers (use_dt_domains = 0).
6228 *
6229 * All other combinations imply we have a platform that is trying
6230 * to mix domain numbers obtained from DT and pci_get_new_domain_nr(),
6231 * which is a recipe for domain mishandling and it is prevented by
6232 * invalidating the domain value (domain = -1) and printing a
6233 * corresponding error.
6234 */
6235 if (domain >= 0 && use_dt_domains) {
6236 use_dt_domains = 1;
6237 } else if (domain < 0 && use_dt_domains != 1) {
6238 use_dt_domains = 0;
6239 domain = pci_get_new_domain_nr();
6240 } else {
6241 if (parent)
6242 pr_err("Node %pOF has ", parent->of_node);
6243 pr_err("Inconsistent \"linux,pci-domain\" property in DT\n");
6244 domain = -1;
6245 }
6246
6247 return domain;
6248 }
6249
pci_bus_find_domain_nr(struct pci_bus * bus,struct device * parent)6250 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
6251 {
6252 return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
6253 acpi_pci_bus_find_domain_nr(bus);
6254 }
6255 #endif
6256
6257 /**
6258 * pci_ext_cfg_avail - can we access extended PCI config space?
6259 *
6260 * Returns 1 if we can access PCI extended config space (offsets
6261 * greater than 0xff). This is the default implementation. Architecture
6262 * implementations can override this.
6263 */
pci_ext_cfg_avail(void)6264 int __weak pci_ext_cfg_avail(void)
6265 {
6266 return 1;
6267 }
6268
pci_fixup_cardbus(struct pci_bus * bus)6269 void __weak pci_fixup_cardbus(struct pci_bus *bus)
6270 {
6271 }
6272 EXPORT_SYMBOL(pci_fixup_cardbus);
6273
pci_setup(char * str)6274 static int __init pci_setup(char *str)
6275 {
6276 while (str) {
6277 char *k = strchr(str, ',');
6278 if (k)
6279 *k++ = 0;
6280 if (*str && (str = pcibios_setup(str)) && *str) {
6281 if (!strcmp(str, "nomsi")) {
6282 pci_no_msi();
6283 } else if (!strncmp(str, "noats", 5)) {
6284 pr_info("PCIe: ATS is disabled\n");
6285 pcie_ats_disabled = true;
6286 } else if (!strcmp(str, "noaer")) {
6287 pci_no_aer();
6288 } else if (!strcmp(str, "earlydump")) {
6289 pci_early_dump = true;
6290 } else if (!strncmp(str, "realloc=", 8)) {
6291 pci_realloc_get_opt(str + 8);
6292 } else if (!strncmp(str, "realloc", 7)) {
6293 pci_realloc_get_opt("on");
6294 } else if (!strcmp(str, "nodomains")) {
6295 pci_no_domains();
6296 } else if (!strncmp(str, "noari", 5)) {
6297 pcie_ari_disabled = true;
6298 } else if (!strncmp(str, "cbiosize=", 9)) {
6299 pci_cardbus_io_size = memparse(str + 9, &str);
6300 } else if (!strncmp(str, "cbmemsize=", 10)) {
6301 pci_cardbus_mem_size = memparse(str + 10, &str);
6302 } else if (!strncmp(str, "resource_alignment=", 19)) {
6303 resource_alignment_param = str + 19;
6304 } else if (!strncmp(str, "ecrc=", 5)) {
6305 pcie_ecrc_get_policy(str + 5);
6306 } else if (!strncmp(str, "hpiosize=", 9)) {
6307 pci_hotplug_io_size = memparse(str + 9, &str);
6308 } else if (!strncmp(str, "hpmemsize=", 10)) {
6309 pci_hotplug_mem_size = memparse(str + 10, &str);
6310 } else if (!strncmp(str, "hpbussize=", 10)) {
6311 pci_hotplug_bus_size =
6312 simple_strtoul(str + 10, &str, 0);
6313 if (pci_hotplug_bus_size > 0xff)
6314 pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
6315 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
6316 pcie_bus_config = PCIE_BUS_TUNE_OFF;
6317 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
6318 pcie_bus_config = PCIE_BUS_SAFE;
6319 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
6320 pcie_bus_config = PCIE_BUS_PERFORMANCE;
6321 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
6322 pcie_bus_config = PCIE_BUS_PEER2PEER;
6323 } else if (!strncmp(str, "pcie_scan_all", 13)) {
6324 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
6325 } else if (!strncmp(str, "disable_acs_redir=", 18)) {
6326 disable_acs_redir_param = str + 18;
6327 } else {
6328 pr_err("PCI: Unknown option `%s'\n", str);
6329 }
6330 }
6331 str = k;
6332 }
6333 return 0;
6334 }
6335 early_param("pci", pci_setup);
6336
6337 /*
6338 * 'resource_alignment_param' and 'disable_acs_redir_param' are initialized
6339 * in pci_setup(), above, to point to data in the __initdata section which
6340 * will be freed after the init sequence is complete. We can't allocate memory
6341 * in pci_setup() because some architectures do not have any memory allocation
6342 * service available during an early_param() call. So we allocate memory and
6343 * copy the variable here before the init section is freed.
6344 *
6345 */
pci_realloc_setup_params(void)6346 static int __init pci_realloc_setup_params(void)
6347 {
6348 resource_alignment_param = kstrdup(resource_alignment_param,
6349 GFP_KERNEL);
6350 disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
6351
6352 return 0;
6353 }
6354 pure_initcall(pci_realloc_setup_params);
6355