1 /*
2 * PCI Bus Services, see include/linux/pci.h for further explanation.
3 *
4 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
5 * David Mosberger-Tang
6 *
7 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
8 */
9
10 #include <linux/acpi.h>
11 #include <linux/kernel.h>
12 #include <linux/delay.h>
13 #include <linux/dmi.h>
14 #include <linux/init.h>
15 #include <linux/of.h>
16 #include <linux/of_pci.h>
17 #include <linux/pci.h>
18 #include <linux/pm.h>
19 #include <linux/slab.h>
20 #include <linux/module.h>
21 #include <linux/spinlock.h>
22 #include <linux/string.h>
23 #include <linux/log2.h>
24 #include <linux/pci-aspm.h>
25 #include <linux/pm_wakeup.h>
26 #include <linux/interrupt.h>
27 #include <linux/device.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/pci_hotplug.h>
30 #include <linux/vmalloc.h>
31 #include <asm/setup.h>
32 #include <asm/dma.h>
33 #include <linux/aer.h>
34 #include "pci.h"
35
36 const char *pci_power_names[] = {
37 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
38 };
39 EXPORT_SYMBOL_GPL(pci_power_names);
40
41 int isa_dma_bridge_buggy;
42 EXPORT_SYMBOL(isa_dma_bridge_buggy);
43
44 int pci_pci_problems;
45 EXPORT_SYMBOL(pci_pci_problems);
46
47 unsigned int pci_pm_d3_delay;
48
49 static void pci_pme_list_scan(struct work_struct *work);
50
51 static LIST_HEAD(pci_pme_list);
52 static DEFINE_MUTEX(pci_pme_list_mutex);
53 static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
54
55 struct pci_pme_device {
56 struct list_head list;
57 struct pci_dev *dev;
58 };
59
60 #define PME_TIMEOUT 1000 /* How long between PME checks */
61
pci_dev_d3_sleep(struct pci_dev * dev)62 static void pci_dev_d3_sleep(struct pci_dev *dev)
63 {
64 unsigned int delay = dev->d3_delay;
65
66 if (delay < pci_pm_d3_delay)
67 delay = pci_pm_d3_delay;
68
69 msleep(delay);
70 }
71
72 #ifdef CONFIG_PCI_DOMAINS
73 int pci_domains_supported = 1;
74 #endif
75
76 #define DEFAULT_CARDBUS_IO_SIZE (256)
77 #define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
78 /* pci=cbmemsize=nnM,cbiosize=nn can override this */
79 unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
80 unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
81
82 #define DEFAULT_HOTPLUG_IO_SIZE (256)
83 #define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024)
84 /* pci=hpmemsize=nnM,hpiosize=nn can override this */
85 unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
86 unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
87
88 #define DEFAULT_HOTPLUG_BUS_SIZE 1
89 unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
90
91 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
92
93 /*
94 * The default CLS is used if arch didn't set CLS explicitly and not
95 * all pci devices agree on the same value. Arch can override either
96 * the dfl or actual value as it sees fit. Don't forget this is
97 * measured in 32-bit words, not bytes.
98 */
99 u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
100 u8 pci_cache_line_size;
101
102 /*
103 * If we set up a device for bus mastering, we need to check the latency
104 * timer as certain BIOSes forget to set it properly.
105 */
106 unsigned int pcibios_max_latency = 255;
107
108 /* If set, the PCIe ARI capability will not be used. */
109 static bool pcie_ari_disabled;
110
111 /* Disable bridge_d3 for all PCIe ports */
112 static bool pci_bridge_d3_disable;
113 /* Force bridge_d3 for all PCIe ports */
114 static bool pci_bridge_d3_force;
115
pcie_port_pm_setup(char * str)116 static int __init pcie_port_pm_setup(char *str)
117 {
118 if (!strcmp(str, "off"))
119 pci_bridge_d3_disable = true;
120 else if (!strcmp(str, "force"))
121 pci_bridge_d3_force = true;
122 return 1;
123 }
124 __setup("pcie_port_pm=", pcie_port_pm_setup);
125
126 /**
127 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
128 * @bus: pointer to PCI bus structure to search
129 *
130 * Given a PCI bus, returns the highest PCI bus number present in the set
131 * including the given PCI bus and its list of child PCI buses.
132 */
pci_bus_max_busnr(struct pci_bus * bus)133 unsigned char pci_bus_max_busnr(struct pci_bus *bus)
134 {
135 struct pci_bus *tmp;
136 unsigned char max, n;
137
138 max = bus->busn_res.end;
139 list_for_each_entry(tmp, &bus->children, node) {
140 n = pci_bus_max_busnr(tmp);
141 if (n > max)
142 max = n;
143 }
144 return max;
145 }
146 EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
147
148 #ifdef CONFIG_HAS_IOMEM
pci_ioremap_bar(struct pci_dev * pdev,int bar)149 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
150 {
151 struct resource *res = &pdev->resource[bar];
152
153 /*
154 * Make sure the BAR is actually a memory resource, not an IO resource
155 */
156 if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
157 dev_warn(&pdev->dev, "can't ioremap BAR %d: %pR\n", bar, res);
158 return NULL;
159 }
160 return ioremap_nocache(res->start, resource_size(res));
161 }
162 EXPORT_SYMBOL_GPL(pci_ioremap_bar);
163
pci_ioremap_wc_bar(struct pci_dev * pdev,int bar)164 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
165 {
166 /*
167 * Make sure the BAR is actually a memory resource, not an IO resource
168 */
169 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
170 WARN_ON(1);
171 return NULL;
172 }
173 return ioremap_wc(pci_resource_start(pdev, bar),
174 pci_resource_len(pdev, bar));
175 }
176 EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
177 #endif
178
179
__pci_find_next_cap_ttl(struct pci_bus * bus,unsigned int devfn,u8 pos,int cap,int * ttl)180 static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
181 u8 pos, int cap, int *ttl)
182 {
183 u8 id;
184 u16 ent;
185
186 pci_bus_read_config_byte(bus, devfn, pos, &pos);
187
188 while ((*ttl)--) {
189 if (pos < 0x40)
190 break;
191 pos &= ~3;
192 pci_bus_read_config_word(bus, devfn, pos, &ent);
193
194 id = ent & 0xff;
195 if (id == 0xff)
196 break;
197 if (id == cap)
198 return pos;
199 pos = (ent >> 8);
200 }
201 return 0;
202 }
203
__pci_find_next_cap(struct pci_bus * bus,unsigned int devfn,u8 pos,int cap)204 static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
205 u8 pos, int cap)
206 {
207 int ttl = PCI_FIND_CAP_TTL;
208
209 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
210 }
211
pci_find_next_capability(struct pci_dev * dev,u8 pos,int cap)212 int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
213 {
214 return __pci_find_next_cap(dev->bus, dev->devfn,
215 pos + PCI_CAP_LIST_NEXT, cap);
216 }
217 EXPORT_SYMBOL_GPL(pci_find_next_capability);
218
__pci_bus_find_cap_start(struct pci_bus * bus,unsigned int devfn,u8 hdr_type)219 static int __pci_bus_find_cap_start(struct pci_bus *bus,
220 unsigned int devfn, u8 hdr_type)
221 {
222 u16 status;
223
224 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
225 if (!(status & PCI_STATUS_CAP_LIST))
226 return 0;
227
228 switch (hdr_type) {
229 case PCI_HEADER_TYPE_NORMAL:
230 case PCI_HEADER_TYPE_BRIDGE:
231 return PCI_CAPABILITY_LIST;
232 case PCI_HEADER_TYPE_CARDBUS:
233 return PCI_CB_CAPABILITY_LIST;
234 }
235
236 return 0;
237 }
238
239 /**
240 * pci_find_capability - query for devices' capabilities
241 * @dev: PCI device to query
242 * @cap: capability code
243 *
244 * Tell if a device supports a given PCI capability.
245 * Returns the address of the requested capability structure within the
246 * device's PCI configuration space or 0 in case the device does not
247 * support it. Possible values for @cap:
248 *
249 * %PCI_CAP_ID_PM Power Management
250 * %PCI_CAP_ID_AGP Accelerated Graphics Port
251 * %PCI_CAP_ID_VPD Vital Product Data
252 * %PCI_CAP_ID_SLOTID Slot Identification
253 * %PCI_CAP_ID_MSI Message Signalled Interrupts
254 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
255 * %PCI_CAP_ID_PCIX PCI-X
256 * %PCI_CAP_ID_EXP PCI Express
257 */
pci_find_capability(struct pci_dev * dev,int cap)258 int pci_find_capability(struct pci_dev *dev, int cap)
259 {
260 int pos;
261
262 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
263 if (pos)
264 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
265
266 return pos;
267 }
268 EXPORT_SYMBOL(pci_find_capability);
269
270 /**
271 * pci_bus_find_capability - query for devices' capabilities
272 * @bus: the PCI bus to query
273 * @devfn: PCI device to query
274 * @cap: capability code
275 *
276 * Like pci_find_capability() but works for pci devices that do not have a
277 * pci_dev structure set up yet.
278 *
279 * Returns the address of the requested capability structure within the
280 * device's PCI configuration space or 0 in case the device does not
281 * support it.
282 */
pci_bus_find_capability(struct pci_bus * bus,unsigned int devfn,int cap)283 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
284 {
285 int pos;
286 u8 hdr_type;
287
288 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
289
290 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
291 if (pos)
292 pos = __pci_find_next_cap(bus, devfn, pos, cap);
293
294 return pos;
295 }
296 EXPORT_SYMBOL(pci_bus_find_capability);
297
298 /**
299 * pci_find_next_ext_capability - Find an extended capability
300 * @dev: PCI device to query
301 * @start: address at which to start looking (0 to start at beginning of list)
302 * @cap: capability code
303 *
304 * Returns the address of the next matching extended capability structure
305 * within the device's PCI configuration space or 0 if the device does
306 * not support it. Some capabilities can occur several times, e.g., the
307 * vendor-specific capability, and this provides a way to find them all.
308 */
pci_find_next_ext_capability(struct pci_dev * dev,int start,int cap)309 int pci_find_next_ext_capability(struct pci_dev *dev, int start, int cap)
310 {
311 u32 header;
312 int ttl;
313 int pos = PCI_CFG_SPACE_SIZE;
314
315 /* minimum 8 bytes per capability */
316 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
317
318 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
319 return 0;
320
321 if (start)
322 pos = start;
323
324 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
325 return 0;
326
327 /*
328 * If we have no capabilities, this is indicated by cap ID,
329 * cap version and next pointer all being 0.
330 */
331 if (header == 0)
332 return 0;
333
334 while (ttl-- > 0) {
335 if (PCI_EXT_CAP_ID(header) == cap && pos != start)
336 return pos;
337
338 pos = PCI_EXT_CAP_NEXT(header);
339 if (pos < PCI_CFG_SPACE_SIZE)
340 break;
341
342 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
343 break;
344 }
345
346 return 0;
347 }
348 EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
349
350 /**
351 * pci_find_ext_capability - Find an extended capability
352 * @dev: PCI device to query
353 * @cap: capability code
354 *
355 * Returns the address of the requested extended capability structure
356 * within the device's PCI configuration space or 0 if the device does
357 * not support it. Possible values for @cap:
358 *
359 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
360 * %PCI_EXT_CAP_ID_VC Virtual Channel
361 * %PCI_EXT_CAP_ID_DSN Device Serial Number
362 * %PCI_EXT_CAP_ID_PWR Power Budgeting
363 */
pci_find_ext_capability(struct pci_dev * dev,int cap)364 int pci_find_ext_capability(struct pci_dev *dev, int cap)
365 {
366 return pci_find_next_ext_capability(dev, 0, cap);
367 }
368 EXPORT_SYMBOL_GPL(pci_find_ext_capability);
369
__pci_find_next_ht_cap(struct pci_dev * dev,int pos,int ht_cap)370 static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
371 {
372 int rc, ttl = PCI_FIND_CAP_TTL;
373 u8 cap, mask;
374
375 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
376 mask = HT_3BIT_CAP_MASK;
377 else
378 mask = HT_5BIT_CAP_MASK;
379
380 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
381 PCI_CAP_ID_HT, &ttl);
382 while (pos) {
383 rc = pci_read_config_byte(dev, pos + 3, &cap);
384 if (rc != PCIBIOS_SUCCESSFUL)
385 return 0;
386
387 if ((cap & mask) == ht_cap)
388 return pos;
389
390 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
391 pos + PCI_CAP_LIST_NEXT,
392 PCI_CAP_ID_HT, &ttl);
393 }
394
395 return 0;
396 }
397 /**
398 * pci_find_next_ht_capability - query a device's Hypertransport capabilities
399 * @dev: PCI device to query
400 * @pos: Position from which to continue searching
401 * @ht_cap: Hypertransport capability code
402 *
403 * To be used in conjunction with pci_find_ht_capability() to search for
404 * all capabilities matching @ht_cap. @pos should always be a value returned
405 * from pci_find_ht_capability().
406 *
407 * NB. To be 100% safe against broken PCI devices, the caller should take
408 * steps to avoid an infinite loop.
409 */
pci_find_next_ht_capability(struct pci_dev * dev,int pos,int ht_cap)410 int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
411 {
412 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
413 }
414 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
415
416 /**
417 * pci_find_ht_capability - query a device's Hypertransport capabilities
418 * @dev: PCI device to query
419 * @ht_cap: Hypertransport capability code
420 *
421 * Tell if a device supports a given Hypertransport capability.
422 * Returns an address within the device's PCI configuration space
423 * or 0 in case the device does not support the request capability.
424 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
425 * which has a Hypertransport capability matching @ht_cap.
426 */
pci_find_ht_capability(struct pci_dev * dev,int ht_cap)427 int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
428 {
429 int pos;
430
431 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
432 if (pos)
433 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
434
435 return pos;
436 }
437 EXPORT_SYMBOL_GPL(pci_find_ht_capability);
438
439 /**
440 * pci_find_parent_resource - return resource region of parent bus of given region
441 * @dev: PCI device structure contains resources to be searched
442 * @res: child resource record for which parent is sought
443 *
444 * For given resource region of given device, return the resource
445 * region of parent bus the given region is contained in.
446 */
pci_find_parent_resource(const struct pci_dev * dev,struct resource * res)447 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
448 struct resource *res)
449 {
450 const struct pci_bus *bus = dev->bus;
451 struct resource *r;
452 int i;
453
454 pci_bus_for_each_resource(bus, r, i) {
455 if (!r)
456 continue;
457 if (res->start && resource_contains(r, res)) {
458
459 /*
460 * If the window is prefetchable but the BAR is
461 * not, the allocator made a mistake.
462 */
463 if (r->flags & IORESOURCE_PREFETCH &&
464 !(res->flags & IORESOURCE_PREFETCH))
465 return NULL;
466
467 /*
468 * If we're below a transparent bridge, there may
469 * be both a positively-decoded aperture and a
470 * subtractively-decoded region that contain the BAR.
471 * We want the positively-decoded one, so this depends
472 * on pci_bus_for_each_resource() giving us those
473 * first.
474 */
475 return r;
476 }
477 }
478 return NULL;
479 }
480 EXPORT_SYMBOL(pci_find_parent_resource);
481
482 /**
483 * pci_find_resource - Return matching PCI device resource
484 * @dev: PCI device to query
485 * @res: Resource to look for
486 *
487 * Goes over standard PCI resources (BARs) and checks if the given resource
488 * is partially or fully contained in any of them. In that case the
489 * matching resource is returned, %NULL otherwise.
490 */
pci_find_resource(struct pci_dev * dev,struct resource * res)491 struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
492 {
493 int i;
494
495 for (i = 0; i < PCI_ROM_RESOURCE; i++) {
496 struct resource *r = &dev->resource[i];
497
498 if (r->start && resource_contains(r, res))
499 return r;
500 }
501
502 return NULL;
503 }
504 EXPORT_SYMBOL(pci_find_resource);
505
506 /**
507 * pci_find_pcie_root_port - return PCIe Root Port
508 * @dev: PCI device to query
509 *
510 * Traverse up the parent chain and return the PCIe Root Port PCI Device
511 * for a given PCI Device.
512 */
pci_find_pcie_root_port(struct pci_dev * dev)513 struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev)
514 {
515 struct pci_dev *bridge, *highest_pcie_bridge = NULL;
516
517 bridge = pci_upstream_bridge(dev);
518 while (bridge && pci_is_pcie(bridge)) {
519 highest_pcie_bridge = bridge;
520 bridge = pci_upstream_bridge(bridge);
521 }
522
523 if (pci_pcie_type(highest_pcie_bridge) != PCI_EXP_TYPE_ROOT_PORT)
524 return NULL;
525
526 return highest_pcie_bridge;
527 }
528 EXPORT_SYMBOL(pci_find_pcie_root_port);
529
530 /**
531 * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
532 * @dev: the PCI device to operate on
533 * @pos: config space offset of status word
534 * @mask: mask of bit(s) to care about in status word
535 *
536 * Return 1 when mask bit(s) in status word clear, 0 otherwise.
537 */
pci_wait_for_pending(struct pci_dev * dev,int pos,u16 mask)538 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
539 {
540 int i;
541
542 /* Wait for Transaction Pending bit clean */
543 for (i = 0; i < 4; i++) {
544 u16 status;
545 if (i)
546 msleep((1 << (i - 1)) * 100);
547
548 pci_read_config_word(dev, pos, &status);
549 if (!(status & mask))
550 return 1;
551 }
552
553 return 0;
554 }
555
556 /**
557 * pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
558 * @dev: PCI device to have its BARs restored
559 *
560 * Restore the BAR values for a given device, so as to make it
561 * accessible by its driver.
562 */
pci_restore_bars(struct pci_dev * dev)563 static void pci_restore_bars(struct pci_dev *dev)
564 {
565 int i;
566
567 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
568 pci_update_resource(dev, i);
569 }
570
571 static const struct pci_platform_pm_ops *pci_platform_pm;
572
pci_set_platform_pm(const struct pci_platform_pm_ops * ops)573 int pci_set_platform_pm(const struct pci_platform_pm_ops *ops)
574 {
575 if (!ops->is_manageable || !ops->set_state || !ops->get_state ||
576 !ops->choose_state || !ops->sleep_wake || !ops->run_wake ||
577 !ops->need_resume)
578 return -EINVAL;
579 pci_platform_pm = ops;
580 return 0;
581 }
582
platform_pci_power_manageable(struct pci_dev * dev)583 static inline bool platform_pci_power_manageable(struct pci_dev *dev)
584 {
585 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
586 }
587
platform_pci_set_power_state(struct pci_dev * dev,pci_power_t t)588 static inline int platform_pci_set_power_state(struct pci_dev *dev,
589 pci_power_t t)
590 {
591 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
592 }
593
platform_pci_get_power_state(struct pci_dev * dev)594 static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
595 {
596 return pci_platform_pm ? pci_platform_pm->get_state(dev) : PCI_UNKNOWN;
597 }
598
platform_pci_choose_state(struct pci_dev * dev)599 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
600 {
601 return pci_platform_pm ?
602 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
603 }
604
platform_pci_sleep_wake(struct pci_dev * dev,bool enable)605 static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
606 {
607 return pci_platform_pm ?
608 pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
609 }
610
platform_pci_run_wake(struct pci_dev * dev,bool enable)611 static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
612 {
613 return pci_platform_pm ?
614 pci_platform_pm->run_wake(dev, enable) : -ENODEV;
615 }
616
platform_pci_need_resume(struct pci_dev * dev)617 static inline bool platform_pci_need_resume(struct pci_dev *dev)
618 {
619 return pci_platform_pm ? pci_platform_pm->need_resume(dev) : false;
620 }
621
622 /**
623 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
624 * given PCI device
625 * @dev: PCI device to handle.
626 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
627 *
628 * RETURN VALUE:
629 * -EINVAL if the requested state is invalid.
630 * -EIO if device does not support PCI PM or its PM capabilities register has a
631 * wrong version, or device doesn't support the requested state.
632 * 0 if device already is in the requested state.
633 * 0 if device's power state has been successfully changed.
634 */
pci_raw_set_power_state(struct pci_dev * dev,pci_power_t state)635 static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
636 {
637 u16 pmcsr;
638 bool need_restore = false;
639
640 /* Check if we're already there */
641 if (dev->current_state == state)
642 return 0;
643
644 if (!dev->pm_cap)
645 return -EIO;
646
647 if (state < PCI_D0 || state > PCI_D3hot)
648 return -EINVAL;
649
650 /* Validate current state:
651 * Can enter D0 from any state, but if we can only go deeper
652 * to sleep if we're already in a low power state
653 */
654 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
655 && dev->current_state > state) {
656 dev_err(&dev->dev, "invalid power transition (from state %d to %d)\n",
657 dev->current_state, state);
658 return -EINVAL;
659 }
660
661 /* check if this device supports the desired state */
662 if ((state == PCI_D1 && !dev->d1_support)
663 || (state == PCI_D2 && !dev->d2_support))
664 return -EIO;
665
666 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
667
668 /* If we're (effectively) in D3, force entire word to 0.
669 * This doesn't affect PME_Status, disables PME_En, and
670 * sets PowerState to 0.
671 */
672 switch (dev->current_state) {
673 case PCI_D0:
674 case PCI_D1:
675 case PCI_D2:
676 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
677 pmcsr |= state;
678 break;
679 case PCI_D3hot:
680 case PCI_D3cold:
681 case PCI_UNKNOWN: /* Boot-up */
682 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
683 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
684 need_restore = true;
685 /* Fall-through: force to D0 */
686 default:
687 pmcsr = 0;
688 break;
689 }
690
691 /* enter specified state */
692 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
693
694 /* Mandatory power management transition delays */
695 /* see PCI PM 1.1 5.6.1 table 18 */
696 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
697 pci_dev_d3_sleep(dev);
698 else if (state == PCI_D2 || dev->current_state == PCI_D2)
699 udelay(PCI_PM_D2_DELAY);
700
701 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
702 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
703 if (dev->current_state != state && printk_ratelimit())
704 dev_info(&dev->dev, "Refused to change power state, currently in D%d\n",
705 dev->current_state);
706
707 /*
708 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
709 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
710 * from D3hot to D0 _may_ perform an internal reset, thereby
711 * going to "D0 Uninitialized" rather than "D0 Initialized".
712 * For example, at least some versions of the 3c905B and the
713 * 3c556B exhibit this behaviour.
714 *
715 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
716 * devices in a D3hot state at boot. Consequently, we need to
717 * restore at least the BARs so that the device will be
718 * accessible to its driver.
719 */
720 if (need_restore)
721 pci_restore_bars(dev);
722
723 if (dev->bus->self)
724 pcie_aspm_pm_state_change(dev->bus->self);
725
726 return 0;
727 }
728
729 /**
730 * pci_update_current_state - Read power state of given device and cache it
731 * @dev: PCI device to handle.
732 * @state: State to cache in case the device doesn't have the PM capability
733 *
734 * The power state is read from the PMCSR register, which however is
735 * inaccessible in D3cold. The platform firmware is therefore queried first
736 * to detect accessibility of the register. In case the platform firmware
737 * reports an incorrect state or the device isn't power manageable by the
738 * platform at all, we try to detect D3cold by testing accessibility of the
739 * vendor ID in config space.
740 */
pci_update_current_state(struct pci_dev * dev,pci_power_t state)741 void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
742 {
743 if (platform_pci_get_power_state(dev) == PCI_D3cold ||
744 !pci_device_is_present(dev)) {
745 dev->current_state = PCI_D3cold;
746 } else if (dev->pm_cap) {
747 u16 pmcsr;
748
749 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
750 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
751 } else {
752 dev->current_state = state;
753 }
754 }
755
756 /**
757 * pci_power_up - Put the given device into D0 forcibly
758 * @dev: PCI device to power up
759 */
pci_power_up(struct pci_dev * dev)760 void pci_power_up(struct pci_dev *dev)
761 {
762 if (platform_pci_power_manageable(dev))
763 platform_pci_set_power_state(dev, PCI_D0);
764
765 pci_raw_set_power_state(dev, PCI_D0);
766 pci_update_current_state(dev, PCI_D0);
767 }
768
769 /**
770 * pci_platform_power_transition - Use platform to change device power state
771 * @dev: PCI device to handle.
772 * @state: State to put the device into.
773 */
pci_platform_power_transition(struct pci_dev * dev,pci_power_t state)774 static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
775 {
776 int error;
777
778 if (platform_pci_power_manageable(dev)) {
779 error = platform_pci_set_power_state(dev, state);
780 if (!error)
781 pci_update_current_state(dev, state);
782 } else
783 error = -ENODEV;
784
785 if (error && !dev->pm_cap) /* Fall back to PCI_D0 */
786 dev->current_state = PCI_D0;
787
788 return error;
789 }
790
791 /**
792 * pci_wakeup - Wake up a PCI device
793 * @pci_dev: Device to handle.
794 * @ign: ignored parameter
795 */
pci_wakeup(struct pci_dev * pci_dev,void * ign)796 static int pci_wakeup(struct pci_dev *pci_dev, void *ign)
797 {
798 pci_wakeup_event(pci_dev);
799 pm_request_resume(&pci_dev->dev);
800 return 0;
801 }
802
803 /**
804 * pci_wakeup_bus - Walk given bus and wake up devices on it
805 * @bus: Top bus of the subtree to walk.
806 */
pci_wakeup_bus(struct pci_bus * bus)807 static void pci_wakeup_bus(struct pci_bus *bus)
808 {
809 if (bus)
810 pci_walk_bus(bus, pci_wakeup, NULL);
811 }
812
813 /**
814 * __pci_start_power_transition - Start power transition of a PCI device
815 * @dev: PCI device to handle.
816 * @state: State to put the device into.
817 */
__pci_start_power_transition(struct pci_dev * dev,pci_power_t state)818 static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
819 {
820 if (state == PCI_D0) {
821 pci_platform_power_transition(dev, PCI_D0);
822 /*
823 * Mandatory power management transition delays, see
824 * PCI Express Base Specification Revision 2.0 Section
825 * 6.6.1: Conventional Reset. Do not delay for
826 * devices powered on/off by corresponding bridge,
827 * because have already delayed for the bridge.
828 */
829 if (dev->runtime_d3cold) {
830 msleep(dev->d3cold_delay);
831 /*
832 * When powering on a bridge from D3cold, the
833 * whole hierarchy may be powered on into
834 * D0uninitialized state, resume them to give
835 * them a chance to suspend again
836 */
837 pci_wakeup_bus(dev->subordinate);
838 }
839 }
840 }
841
842 /**
843 * __pci_dev_set_current_state - Set current state of a PCI device
844 * @dev: Device to handle
845 * @data: pointer to state to be set
846 */
__pci_dev_set_current_state(struct pci_dev * dev,void * data)847 static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
848 {
849 pci_power_t state = *(pci_power_t *)data;
850
851 dev->current_state = state;
852 return 0;
853 }
854
855 /**
856 * __pci_bus_set_current_state - Walk given bus and set current state of devices
857 * @bus: Top bus of the subtree to walk.
858 * @state: state to be set
859 */
__pci_bus_set_current_state(struct pci_bus * bus,pci_power_t state)860 static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
861 {
862 if (bus)
863 pci_walk_bus(bus, __pci_dev_set_current_state, &state);
864 }
865
866 /**
867 * __pci_complete_power_transition - Complete power transition of a PCI device
868 * @dev: PCI device to handle.
869 * @state: State to put the device into.
870 *
871 * This function should not be called directly by device drivers.
872 */
__pci_complete_power_transition(struct pci_dev * dev,pci_power_t state)873 int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
874 {
875 int ret;
876
877 if (state <= PCI_D0)
878 return -EINVAL;
879 ret = pci_platform_power_transition(dev, state);
880 /* Power off the bridge may power off the whole hierarchy */
881 if (!ret && state == PCI_D3cold)
882 __pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
883 return ret;
884 }
885 EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
886
887 /**
888 * pci_set_power_state - Set the power state of a PCI device
889 * @dev: PCI device to handle.
890 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
891 *
892 * Transition a device to a new power state, using the platform firmware and/or
893 * the device's PCI PM registers.
894 *
895 * RETURN VALUE:
896 * -EINVAL if the requested state is invalid.
897 * -EIO if device does not support PCI PM or its PM capabilities register has a
898 * wrong version, or device doesn't support the requested state.
899 * 0 if device already is in the requested state.
900 * 0 if device's power state has been successfully changed.
901 */
pci_set_power_state(struct pci_dev * dev,pci_power_t state)902 int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
903 {
904 int error;
905
906 /* bound the state we're entering */
907 if (state > PCI_D3cold)
908 state = PCI_D3cold;
909 else if (state < PCI_D0)
910 state = PCI_D0;
911 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
912 /*
913 * If the device or the parent bridge do not support PCI PM,
914 * ignore the request if we're doing anything other than putting
915 * it into D0 (which would only happen on boot).
916 */
917 return 0;
918
919 /* Check if we're already there */
920 if (dev->current_state == state)
921 return 0;
922
923 __pci_start_power_transition(dev, state);
924
925 /* This device is quirked not to be put into D3, so
926 don't put it in D3 */
927 if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
928 return 0;
929
930 /*
931 * To put device in D3cold, we put device into D3hot in native
932 * way, then put device into D3cold with platform ops
933 */
934 error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
935 PCI_D3hot : state);
936
937 if (!__pci_complete_power_transition(dev, state))
938 error = 0;
939
940 return error;
941 }
942 EXPORT_SYMBOL(pci_set_power_state);
943
944 /**
945 * pci_choose_state - Choose the power state of a PCI device
946 * @dev: PCI device to be suspended
947 * @state: target sleep state for the whole system. This is the value
948 * that is passed to suspend() function.
949 *
950 * Returns PCI power state suitable for given device and given system
951 * message.
952 */
953
pci_choose_state(struct pci_dev * dev,pm_message_t state)954 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
955 {
956 pci_power_t ret;
957
958 if (!dev->pm_cap)
959 return PCI_D0;
960
961 ret = platform_pci_choose_state(dev);
962 if (ret != PCI_POWER_ERROR)
963 return ret;
964
965 switch (state.event) {
966 case PM_EVENT_ON:
967 return PCI_D0;
968 case PM_EVENT_FREEZE:
969 case PM_EVENT_PRETHAW:
970 /* REVISIT both freeze and pre-thaw "should" use D0 */
971 case PM_EVENT_SUSPEND:
972 case PM_EVENT_HIBERNATE:
973 return PCI_D3hot;
974 default:
975 dev_info(&dev->dev, "unrecognized suspend event %d\n",
976 state.event);
977 BUG();
978 }
979 return PCI_D0;
980 }
981 EXPORT_SYMBOL(pci_choose_state);
982
983 #define PCI_EXP_SAVE_REGS 7
984
_pci_find_saved_cap(struct pci_dev * pci_dev,u16 cap,bool extended)985 static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
986 u16 cap, bool extended)
987 {
988 struct pci_cap_saved_state *tmp;
989
990 hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
991 if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
992 return tmp;
993 }
994 return NULL;
995 }
996
pci_find_saved_cap(struct pci_dev * dev,char cap)997 struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
998 {
999 return _pci_find_saved_cap(dev, cap, false);
1000 }
1001
pci_find_saved_ext_cap(struct pci_dev * dev,u16 cap)1002 struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
1003 {
1004 return _pci_find_saved_cap(dev, cap, true);
1005 }
1006
pci_save_pcie_state(struct pci_dev * dev)1007 static int pci_save_pcie_state(struct pci_dev *dev)
1008 {
1009 int i = 0;
1010 struct pci_cap_saved_state *save_state;
1011 u16 *cap;
1012
1013 if (!pci_is_pcie(dev))
1014 return 0;
1015
1016 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1017 if (!save_state) {
1018 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
1019 return -ENOMEM;
1020 }
1021
1022 cap = (u16 *)&save_state->cap.data[0];
1023 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
1024 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
1025 pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
1026 pcie_capability_read_word(dev, PCI_EXP_RTCTL, &cap[i++]);
1027 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
1028 pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
1029 pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
1030
1031 return 0;
1032 }
1033
pci_restore_pcie_state(struct pci_dev * dev)1034 static void pci_restore_pcie_state(struct pci_dev *dev)
1035 {
1036 int i = 0;
1037 struct pci_cap_saved_state *save_state;
1038 u16 *cap;
1039
1040 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1041 if (!save_state)
1042 return;
1043
1044 cap = (u16 *)&save_state->cap.data[0];
1045 pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
1046 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
1047 pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
1048 pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
1049 pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
1050 pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
1051 pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
1052 }
1053
1054
pci_save_pcix_state(struct pci_dev * dev)1055 static int pci_save_pcix_state(struct pci_dev *dev)
1056 {
1057 int pos;
1058 struct pci_cap_saved_state *save_state;
1059
1060 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1061 if (!pos)
1062 return 0;
1063
1064 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1065 if (!save_state) {
1066 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
1067 return -ENOMEM;
1068 }
1069
1070 pci_read_config_word(dev, pos + PCI_X_CMD,
1071 (u16 *)save_state->cap.data);
1072
1073 return 0;
1074 }
1075
pci_restore_pcix_state(struct pci_dev * dev)1076 static void pci_restore_pcix_state(struct pci_dev *dev)
1077 {
1078 int i = 0, pos;
1079 struct pci_cap_saved_state *save_state;
1080 u16 *cap;
1081
1082 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1083 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1084 if (!save_state || !pos)
1085 return;
1086 cap = (u16 *)&save_state->cap.data[0];
1087
1088 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1089 }
1090
1091
1092 /**
1093 * pci_save_state - save the PCI configuration space of a device before suspending
1094 * @dev: - PCI device that we're dealing with
1095 */
pci_save_state(struct pci_dev * dev)1096 int pci_save_state(struct pci_dev *dev)
1097 {
1098 int i;
1099 /* XXX: 100% dword access ok here? */
1100 for (i = 0; i < 16; i++)
1101 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1102 dev->state_saved = true;
1103
1104 i = pci_save_pcie_state(dev);
1105 if (i != 0)
1106 return i;
1107
1108 i = pci_save_pcix_state(dev);
1109 if (i != 0)
1110 return i;
1111
1112 return pci_save_vc_state(dev);
1113 }
1114 EXPORT_SYMBOL(pci_save_state);
1115
pci_restore_config_dword(struct pci_dev * pdev,int offset,u32 saved_val,int retry)1116 static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1117 u32 saved_val, int retry)
1118 {
1119 u32 val;
1120
1121 pci_read_config_dword(pdev, offset, &val);
1122 if (val == saved_val)
1123 return;
1124
1125 for (;;) {
1126 dev_dbg(&pdev->dev, "restoring config space at offset %#x (was %#x, writing %#x)\n",
1127 offset, val, saved_val);
1128 pci_write_config_dword(pdev, offset, saved_val);
1129 if (retry-- <= 0)
1130 return;
1131
1132 pci_read_config_dword(pdev, offset, &val);
1133 if (val == saved_val)
1134 return;
1135
1136 mdelay(1);
1137 }
1138 }
1139
pci_restore_config_space_range(struct pci_dev * pdev,int start,int end,int retry)1140 static void pci_restore_config_space_range(struct pci_dev *pdev,
1141 int start, int end, int retry)
1142 {
1143 int index;
1144
1145 for (index = end; index >= start; index--)
1146 pci_restore_config_dword(pdev, 4 * index,
1147 pdev->saved_config_space[index],
1148 retry);
1149 }
1150
pci_restore_config_space(struct pci_dev * pdev)1151 static void pci_restore_config_space(struct pci_dev *pdev)
1152 {
1153 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1154 pci_restore_config_space_range(pdev, 10, 15, 0);
1155 /* Restore BARs before the command register. */
1156 pci_restore_config_space_range(pdev, 4, 9, 10);
1157 pci_restore_config_space_range(pdev, 0, 3, 0);
1158 } else {
1159 pci_restore_config_space_range(pdev, 0, 15, 0);
1160 }
1161 }
1162
1163 /**
1164 * pci_restore_state - Restore the saved state of a PCI device
1165 * @dev: - PCI device that we're dealing with
1166 */
pci_restore_state(struct pci_dev * dev)1167 void pci_restore_state(struct pci_dev *dev)
1168 {
1169 if (!dev->state_saved)
1170 return;
1171
1172 /* PCI Express register must be restored first */
1173 pci_restore_pcie_state(dev);
1174 pci_restore_ats_state(dev);
1175 pci_restore_vc_state(dev);
1176
1177 pci_cleanup_aer_error_status_regs(dev);
1178
1179 pci_restore_config_space(dev);
1180
1181 pci_restore_pcix_state(dev);
1182 pci_restore_msi_state(dev);
1183
1184 /* Restore ACS and IOV configuration state */
1185 pci_enable_acs(dev);
1186 pci_restore_iov_state(dev);
1187
1188 dev->state_saved = false;
1189 }
1190 EXPORT_SYMBOL(pci_restore_state);
1191
1192 struct pci_saved_state {
1193 u32 config_space[16];
1194 struct pci_cap_saved_data cap[0];
1195 };
1196
1197 /**
1198 * pci_store_saved_state - Allocate and return an opaque struct containing
1199 * the device saved state.
1200 * @dev: PCI device that we're dealing with
1201 *
1202 * Return NULL if no state or error.
1203 */
pci_store_saved_state(struct pci_dev * dev)1204 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1205 {
1206 struct pci_saved_state *state;
1207 struct pci_cap_saved_state *tmp;
1208 struct pci_cap_saved_data *cap;
1209 size_t size;
1210
1211 if (!dev->state_saved)
1212 return NULL;
1213
1214 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1215
1216 hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1217 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1218
1219 state = kzalloc(size, GFP_KERNEL);
1220 if (!state)
1221 return NULL;
1222
1223 memcpy(state->config_space, dev->saved_config_space,
1224 sizeof(state->config_space));
1225
1226 cap = state->cap;
1227 hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1228 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1229 memcpy(cap, &tmp->cap, len);
1230 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1231 }
1232 /* Empty cap_save terminates list */
1233
1234 return state;
1235 }
1236 EXPORT_SYMBOL_GPL(pci_store_saved_state);
1237
1238 /**
1239 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1240 * @dev: PCI device that we're dealing with
1241 * @state: Saved state returned from pci_store_saved_state()
1242 */
pci_load_saved_state(struct pci_dev * dev,struct pci_saved_state * state)1243 int pci_load_saved_state(struct pci_dev *dev,
1244 struct pci_saved_state *state)
1245 {
1246 struct pci_cap_saved_data *cap;
1247
1248 dev->state_saved = false;
1249
1250 if (!state)
1251 return 0;
1252
1253 memcpy(dev->saved_config_space, state->config_space,
1254 sizeof(state->config_space));
1255
1256 cap = state->cap;
1257 while (cap->size) {
1258 struct pci_cap_saved_state *tmp;
1259
1260 tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
1261 if (!tmp || tmp->cap.size != cap->size)
1262 return -EINVAL;
1263
1264 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1265 cap = (struct pci_cap_saved_data *)((u8 *)cap +
1266 sizeof(struct pci_cap_saved_data) + cap->size);
1267 }
1268
1269 dev->state_saved = true;
1270 return 0;
1271 }
1272 EXPORT_SYMBOL_GPL(pci_load_saved_state);
1273
1274 /**
1275 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1276 * and free the memory allocated for it.
1277 * @dev: PCI device that we're dealing with
1278 * @state: Pointer to saved state returned from pci_store_saved_state()
1279 */
pci_load_and_free_saved_state(struct pci_dev * dev,struct pci_saved_state ** state)1280 int pci_load_and_free_saved_state(struct pci_dev *dev,
1281 struct pci_saved_state **state)
1282 {
1283 int ret = pci_load_saved_state(dev, *state);
1284 kfree(*state);
1285 *state = NULL;
1286 return ret;
1287 }
1288 EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1289
pcibios_enable_device(struct pci_dev * dev,int bars)1290 int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
1291 {
1292 return pci_enable_resources(dev, bars);
1293 }
1294
do_pci_enable_device(struct pci_dev * dev,int bars)1295 static int do_pci_enable_device(struct pci_dev *dev, int bars)
1296 {
1297 int err;
1298 struct pci_dev *bridge;
1299 u16 cmd;
1300 u8 pin;
1301
1302 err = pci_set_power_state(dev, PCI_D0);
1303 if (err < 0 && err != -EIO)
1304 return err;
1305
1306 bridge = pci_upstream_bridge(dev);
1307 if (bridge)
1308 pcie_aspm_powersave_config_link(bridge);
1309
1310 err = pcibios_enable_device(dev, bars);
1311 if (err < 0)
1312 return err;
1313 pci_fixup_device(pci_fixup_enable, dev);
1314
1315 if (dev->msi_enabled || dev->msix_enabled)
1316 return 0;
1317
1318 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
1319 if (pin) {
1320 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1321 if (cmd & PCI_COMMAND_INTX_DISABLE)
1322 pci_write_config_word(dev, PCI_COMMAND,
1323 cmd & ~PCI_COMMAND_INTX_DISABLE);
1324 }
1325
1326 return 0;
1327 }
1328
1329 /**
1330 * pci_reenable_device - Resume abandoned device
1331 * @dev: PCI device to be resumed
1332 *
1333 * Note this function is a backend of pci_default_resume and is not supposed
1334 * to be called by normal code, write proper resume handler and use it instead.
1335 */
pci_reenable_device(struct pci_dev * dev)1336 int pci_reenable_device(struct pci_dev *dev)
1337 {
1338 if (pci_is_enabled(dev))
1339 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1340 return 0;
1341 }
1342 EXPORT_SYMBOL(pci_reenable_device);
1343
pci_enable_bridge(struct pci_dev * dev)1344 static void pci_enable_bridge(struct pci_dev *dev)
1345 {
1346 struct pci_dev *bridge;
1347 int retval;
1348
1349 bridge = pci_upstream_bridge(dev);
1350 if (bridge)
1351 pci_enable_bridge(bridge);
1352
1353 if (pci_is_enabled(dev)) {
1354 if (!dev->is_busmaster)
1355 pci_set_master(dev);
1356 return;
1357 }
1358
1359 retval = pci_enable_device(dev);
1360 if (retval)
1361 dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n",
1362 retval);
1363 pci_set_master(dev);
1364 }
1365
pci_enable_device_flags(struct pci_dev * dev,unsigned long flags)1366 static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
1367 {
1368 struct pci_dev *bridge;
1369 int err;
1370 int i, bars = 0;
1371
1372 /*
1373 * Power state could be unknown at this point, either due to a fresh
1374 * boot or a device removal call. So get the current power state
1375 * so that things like MSI message writing will behave as expected
1376 * (e.g. if the device really is in D0 at enable time).
1377 */
1378 if (dev->pm_cap) {
1379 u16 pmcsr;
1380 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1381 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1382 }
1383
1384 if (atomic_inc_return(&dev->enable_cnt) > 1)
1385 return 0; /* already enabled */
1386
1387 bridge = pci_upstream_bridge(dev);
1388 if (bridge)
1389 pci_enable_bridge(bridge);
1390
1391 /* only skip sriov related */
1392 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1393 if (dev->resource[i].flags & flags)
1394 bars |= (1 << i);
1395 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1396 if (dev->resource[i].flags & flags)
1397 bars |= (1 << i);
1398
1399 err = do_pci_enable_device(dev, bars);
1400 if (err < 0)
1401 atomic_dec(&dev->enable_cnt);
1402 return err;
1403 }
1404
1405 /**
1406 * pci_enable_device_io - Initialize a device for use with IO space
1407 * @dev: PCI device to be initialized
1408 *
1409 * Initialize device before it's used by a driver. Ask low-level code
1410 * to enable I/O resources. Wake up the device if it was suspended.
1411 * Beware, this function can fail.
1412 */
pci_enable_device_io(struct pci_dev * dev)1413 int pci_enable_device_io(struct pci_dev *dev)
1414 {
1415 return pci_enable_device_flags(dev, IORESOURCE_IO);
1416 }
1417 EXPORT_SYMBOL(pci_enable_device_io);
1418
1419 /**
1420 * pci_enable_device_mem - Initialize a device for use with Memory space
1421 * @dev: PCI device to be initialized
1422 *
1423 * Initialize device before it's used by a driver. Ask low-level code
1424 * to enable Memory resources. Wake up the device if it was suspended.
1425 * Beware, this function can fail.
1426 */
pci_enable_device_mem(struct pci_dev * dev)1427 int pci_enable_device_mem(struct pci_dev *dev)
1428 {
1429 return pci_enable_device_flags(dev, IORESOURCE_MEM);
1430 }
1431 EXPORT_SYMBOL(pci_enable_device_mem);
1432
1433 /**
1434 * pci_enable_device - Initialize device before it's used by a driver.
1435 * @dev: PCI device to be initialized
1436 *
1437 * Initialize device before it's used by a driver. Ask low-level code
1438 * to enable I/O and memory. Wake up the device if it was suspended.
1439 * Beware, this function can fail.
1440 *
1441 * Note we don't actually enable the device many times if we call
1442 * this function repeatedly (we just increment the count).
1443 */
pci_enable_device(struct pci_dev * dev)1444 int pci_enable_device(struct pci_dev *dev)
1445 {
1446 return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1447 }
1448 EXPORT_SYMBOL(pci_enable_device);
1449
1450 /*
1451 * Managed PCI resources. This manages device on/off, intx/msi/msix
1452 * on/off and BAR regions. pci_dev itself records msi/msix status, so
1453 * there's no need to track it separately. pci_devres is initialized
1454 * when a device is enabled using managed PCI device enable interface.
1455 */
1456 struct pci_devres {
1457 unsigned int enabled:1;
1458 unsigned int pinned:1;
1459 unsigned int orig_intx:1;
1460 unsigned int restore_intx:1;
1461 u32 region_mask;
1462 };
1463
pcim_release(struct device * gendev,void * res)1464 static void pcim_release(struct device *gendev, void *res)
1465 {
1466 struct pci_dev *dev = to_pci_dev(gendev);
1467 struct pci_devres *this = res;
1468 int i;
1469
1470 if (dev->msi_enabled)
1471 pci_disable_msi(dev);
1472 if (dev->msix_enabled)
1473 pci_disable_msix(dev);
1474
1475 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1476 if (this->region_mask & (1 << i))
1477 pci_release_region(dev, i);
1478
1479 if (this->restore_intx)
1480 pci_intx(dev, this->orig_intx);
1481
1482 if (this->enabled && !this->pinned)
1483 pci_disable_device(dev);
1484 }
1485
get_pci_dr(struct pci_dev * pdev)1486 static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
1487 {
1488 struct pci_devres *dr, *new_dr;
1489
1490 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1491 if (dr)
1492 return dr;
1493
1494 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1495 if (!new_dr)
1496 return NULL;
1497 return devres_get(&pdev->dev, new_dr, NULL, NULL);
1498 }
1499
find_pci_dr(struct pci_dev * pdev)1500 static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
1501 {
1502 if (pci_is_managed(pdev))
1503 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1504 return NULL;
1505 }
1506
1507 /**
1508 * pcim_enable_device - Managed pci_enable_device()
1509 * @pdev: PCI device to be initialized
1510 *
1511 * Managed pci_enable_device().
1512 */
pcim_enable_device(struct pci_dev * pdev)1513 int pcim_enable_device(struct pci_dev *pdev)
1514 {
1515 struct pci_devres *dr;
1516 int rc;
1517
1518 dr = get_pci_dr(pdev);
1519 if (unlikely(!dr))
1520 return -ENOMEM;
1521 if (dr->enabled)
1522 return 0;
1523
1524 rc = pci_enable_device(pdev);
1525 if (!rc) {
1526 pdev->is_managed = 1;
1527 dr->enabled = 1;
1528 }
1529 return rc;
1530 }
1531 EXPORT_SYMBOL(pcim_enable_device);
1532
1533 /**
1534 * pcim_pin_device - Pin managed PCI device
1535 * @pdev: PCI device to pin
1536 *
1537 * Pin managed PCI device @pdev. Pinned device won't be disabled on
1538 * driver detach. @pdev must have been enabled with
1539 * pcim_enable_device().
1540 */
pcim_pin_device(struct pci_dev * pdev)1541 void pcim_pin_device(struct pci_dev *pdev)
1542 {
1543 struct pci_devres *dr;
1544
1545 dr = find_pci_dr(pdev);
1546 WARN_ON(!dr || !dr->enabled);
1547 if (dr)
1548 dr->pinned = 1;
1549 }
1550 EXPORT_SYMBOL(pcim_pin_device);
1551
1552 /*
1553 * pcibios_add_device - provide arch specific hooks when adding device dev
1554 * @dev: the PCI device being added
1555 *
1556 * Permits the platform to provide architecture specific functionality when
1557 * devices are added. This is the default implementation. Architecture
1558 * implementations can override this.
1559 */
pcibios_add_device(struct pci_dev * dev)1560 int __weak pcibios_add_device(struct pci_dev *dev)
1561 {
1562 return 0;
1563 }
1564
1565 /**
1566 * pcibios_release_device - provide arch specific hooks when releasing device dev
1567 * @dev: the PCI device being released
1568 *
1569 * Permits the platform to provide architecture specific functionality when
1570 * devices are released. This is the default implementation. Architecture
1571 * implementations can override this.
1572 */
pcibios_release_device(struct pci_dev * dev)1573 void __weak pcibios_release_device(struct pci_dev *dev) {}
1574
1575 /**
1576 * pcibios_disable_device - disable arch specific PCI resources for device dev
1577 * @dev: the PCI device to disable
1578 *
1579 * Disables architecture specific PCI resources for the device. This
1580 * is the default implementation. Architecture implementations can
1581 * override this.
1582 */
pcibios_disable_device(struct pci_dev * dev)1583 void __weak pcibios_disable_device(struct pci_dev *dev) {}
1584
1585 /**
1586 * pcibios_penalize_isa_irq - penalize an ISA IRQ
1587 * @irq: ISA IRQ to penalize
1588 * @active: IRQ active or not
1589 *
1590 * Permits the platform to provide architecture-specific functionality when
1591 * penalizing ISA IRQs. This is the default implementation. Architecture
1592 * implementations can override this.
1593 */
pcibios_penalize_isa_irq(int irq,int active)1594 void __weak pcibios_penalize_isa_irq(int irq, int active) {}
1595
do_pci_disable_device(struct pci_dev * dev)1596 static void do_pci_disable_device(struct pci_dev *dev)
1597 {
1598 u16 pci_command;
1599
1600 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1601 if (pci_command & PCI_COMMAND_MASTER) {
1602 pci_command &= ~PCI_COMMAND_MASTER;
1603 pci_write_config_word(dev, PCI_COMMAND, pci_command);
1604 }
1605
1606 pcibios_disable_device(dev);
1607 }
1608
1609 /**
1610 * pci_disable_enabled_device - Disable device without updating enable_cnt
1611 * @dev: PCI device to disable
1612 *
1613 * NOTE: This function is a backend of PCI power management routines and is
1614 * not supposed to be called drivers.
1615 */
pci_disable_enabled_device(struct pci_dev * dev)1616 void pci_disable_enabled_device(struct pci_dev *dev)
1617 {
1618 if (pci_is_enabled(dev))
1619 do_pci_disable_device(dev);
1620 }
1621
1622 /**
1623 * pci_disable_device - Disable PCI device after use
1624 * @dev: PCI device to be disabled
1625 *
1626 * Signal to the system that the PCI device is not in use by the system
1627 * anymore. This only involves disabling PCI bus-mastering, if active.
1628 *
1629 * Note we don't actually disable the device until all callers of
1630 * pci_enable_device() have called pci_disable_device().
1631 */
pci_disable_device(struct pci_dev * dev)1632 void pci_disable_device(struct pci_dev *dev)
1633 {
1634 struct pci_devres *dr;
1635
1636 dr = find_pci_dr(dev);
1637 if (dr)
1638 dr->enabled = 0;
1639
1640 dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
1641 "disabling already-disabled device");
1642
1643 if (atomic_dec_return(&dev->enable_cnt) != 0)
1644 return;
1645
1646 do_pci_disable_device(dev);
1647
1648 dev->is_busmaster = 0;
1649 }
1650 EXPORT_SYMBOL(pci_disable_device);
1651
1652 /**
1653 * pcibios_set_pcie_reset_state - set reset state for device dev
1654 * @dev: the PCIe device reset
1655 * @state: Reset state to enter into
1656 *
1657 *
1658 * Sets the PCIe reset state for the device. This is the default
1659 * implementation. Architecture implementations can override this.
1660 */
pcibios_set_pcie_reset_state(struct pci_dev * dev,enum pcie_reset_state state)1661 int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
1662 enum pcie_reset_state state)
1663 {
1664 return -EINVAL;
1665 }
1666
1667 /**
1668 * pci_set_pcie_reset_state - set reset state for device dev
1669 * @dev: the PCIe device reset
1670 * @state: Reset state to enter into
1671 *
1672 *
1673 * Sets the PCI reset state for the device.
1674 */
pci_set_pcie_reset_state(struct pci_dev * dev,enum pcie_reset_state state)1675 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1676 {
1677 return pcibios_set_pcie_reset_state(dev, state);
1678 }
1679 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
1680
1681 /**
1682 * pci_check_pme_status - Check if given device has generated PME.
1683 * @dev: Device to check.
1684 *
1685 * Check the PME status of the device and if set, clear it and clear PME enable
1686 * (if set). Return 'true' if PME status and PME enable were both set or
1687 * 'false' otherwise.
1688 */
pci_check_pme_status(struct pci_dev * dev)1689 bool pci_check_pme_status(struct pci_dev *dev)
1690 {
1691 int pmcsr_pos;
1692 u16 pmcsr;
1693 bool ret = false;
1694
1695 if (!dev->pm_cap)
1696 return false;
1697
1698 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1699 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1700 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1701 return false;
1702
1703 /* Clear PME status. */
1704 pmcsr |= PCI_PM_CTRL_PME_STATUS;
1705 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1706 /* Disable PME to avoid interrupt flood. */
1707 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1708 ret = true;
1709 }
1710
1711 pci_write_config_word(dev, pmcsr_pos, pmcsr);
1712
1713 return ret;
1714 }
1715
1716 /**
1717 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
1718 * @dev: Device to handle.
1719 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
1720 *
1721 * Check if @dev has generated PME and queue a resume request for it in that
1722 * case.
1723 */
pci_pme_wakeup(struct pci_dev * dev,void * pme_poll_reset)1724 static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
1725 {
1726 if (pme_poll_reset && dev->pme_poll)
1727 dev->pme_poll = false;
1728
1729 if (pci_check_pme_status(dev)) {
1730 pci_wakeup_event(dev);
1731 pm_request_resume(&dev->dev);
1732 }
1733 return 0;
1734 }
1735
1736 /**
1737 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
1738 * @bus: Top bus of the subtree to walk.
1739 */
pci_pme_wakeup_bus(struct pci_bus * bus)1740 void pci_pme_wakeup_bus(struct pci_bus *bus)
1741 {
1742 if (bus)
1743 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
1744 }
1745
1746
1747 /**
1748 * pci_pme_capable - check the capability of PCI device to generate PME#
1749 * @dev: PCI device to handle.
1750 * @state: PCI state from which device will issue PME#.
1751 */
pci_pme_capable(struct pci_dev * dev,pci_power_t state)1752 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
1753 {
1754 if (!dev->pm_cap)
1755 return false;
1756
1757 return !!(dev->pme_support & (1 << state));
1758 }
1759 EXPORT_SYMBOL(pci_pme_capable);
1760
pci_pme_list_scan(struct work_struct * work)1761 static void pci_pme_list_scan(struct work_struct *work)
1762 {
1763 struct pci_pme_device *pme_dev, *n;
1764
1765 mutex_lock(&pci_pme_list_mutex);
1766 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
1767 if (pme_dev->dev->pme_poll) {
1768 struct pci_dev *bridge;
1769
1770 bridge = pme_dev->dev->bus->self;
1771 /*
1772 * If bridge is in low power state, the
1773 * configuration space of subordinate devices
1774 * may be not accessible
1775 */
1776 if (bridge && bridge->current_state != PCI_D0)
1777 continue;
1778 pci_pme_wakeup(pme_dev->dev, NULL);
1779 } else {
1780 list_del(&pme_dev->list);
1781 kfree(pme_dev);
1782 }
1783 }
1784 if (!list_empty(&pci_pme_list))
1785 queue_delayed_work(system_freezable_wq, &pci_pme_work,
1786 msecs_to_jiffies(PME_TIMEOUT));
1787 mutex_unlock(&pci_pme_list_mutex);
1788 }
1789
__pci_pme_active(struct pci_dev * dev,bool enable)1790 static void __pci_pme_active(struct pci_dev *dev, bool enable)
1791 {
1792 u16 pmcsr;
1793
1794 if (!dev->pme_support)
1795 return;
1796
1797 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1798 /* Clear PME_Status by writing 1 to it and enable PME# */
1799 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1800 if (!enable)
1801 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1802
1803 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1804 }
1805
1806 /**
1807 * pci_pme_active - enable or disable PCI device's PME# function
1808 * @dev: PCI device to handle.
1809 * @enable: 'true' to enable PME# generation; 'false' to disable it.
1810 *
1811 * The caller must verify that the device is capable of generating PME# before
1812 * calling this function with @enable equal to 'true'.
1813 */
pci_pme_active(struct pci_dev * dev,bool enable)1814 void pci_pme_active(struct pci_dev *dev, bool enable)
1815 {
1816 __pci_pme_active(dev, enable);
1817
1818 /*
1819 * PCI (as opposed to PCIe) PME requires that the device have
1820 * its PME# line hooked up correctly. Not all hardware vendors
1821 * do this, so the PME never gets delivered and the device
1822 * remains asleep. The easiest way around this is to
1823 * periodically walk the list of suspended devices and check
1824 * whether any have their PME flag set. The assumption is that
1825 * we'll wake up often enough anyway that this won't be a huge
1826 * hit, and the power savings from the devices will still be a
1827 * win.
1828 *
1829 * Although PCIe uses in-band PME message instead of PME# line
1830 * to report PME, PME does not work for some PCIe devices in
1831 * reality. For example, there are devices that set their PME
1832 * status bits, but don't really bother to send a PME message;
1833 * there are PCI Express Root Ports that don't bother to
1834 * trigger interrupts when they receive PME messages from the
1835 * devices below. So PME poll is used for PCIe devices too.
1836 */
1837
1838 if (dev->pme_poll) {
1839 struct pci_pme_device *pme_dev;
1840 if (enable) {
1841 pme_dev = kmalloc(sizeof(struct pci_pme_device),
1842 GFP_KERNEL);
1843 if (!pme_dev) {
1844 dev_warn(&dev->dev, "can't enable PME#\n");
1845 return;
1846 }
1847 pme_dev->dev = dev;
1848 mutex_lock(&pci_pme_list_mutex);
1849 list_add(&pme_dev->list, &pci_pme_list);
1850 if (list_is_singular(&pci_pme_list))
1851 queue_delayed_work(system_freezable_wq,
1852 &pci_pme_work,
1853 msecs_to_jiffies(PME_TIMEOUT));
1854 mutex_unlock(&pci_pme_list_mutex);
1855 } else {
1856 mutex_lock(&pci_pme_list_mutex);
1857 list_for_each_entry(pme_dev, &pci_pme_list, list) {
1858 if (pme_dev->dev == dev) {
1859 list_del(&pme_dev->list);
1860 kfree(pme_dev);
1861 break;
1862 }
1863 }
1864 mutex_unlock(&pci_pme_list_mutex);
1865 }
1866 }
1867
1868 dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled");
1869 }
1870 EXPORT_SYMBOL(pci_pme_active);
1871
1872 /**
1873 * __pci_enable_wake - enable PCI device as wakeup event source
1874 * @dev: PCI device affected
1875 * @state: PCI state from which device will issue wakeup events
1876 * @runtime: True if the events are to be generated at run time
1877 * @enable: True to enable event generation; false to disable
1878 *
1879 * This enables the device as a wakeup event source, or disables it.
1880 * When such events involves platform-specific hooks, those hooks are
1881 * called automatically by this routine.
1882 *
1883 * Devices with legacy power management (no standard PCI PM capabilities)
1884 * always require such platform hooks.
1885 *
1886 * RETURN VALUE:
1887 * 0 is returned on success
1888 * -EINVAL is returned if device is not supposed to wake up the system
1889 * Error code depending on the platform is returned if both the platform and
1890 * the native mechanism fail to enable the generation of wake-up events
1891 */
__pci_enable_wake(struct pci_dev * dev,pci_power_t state,bool runtime,bool enable)1892 int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1893 bool runtime, bool enable)
1894 {
1895 int ret = 0;
1896
1897 if (enable && !runtime && !device_may_wakeup(&dev->dev))
1898 return -EINVAL;
1899
1900 /* Don't do the same thing twice in a row for one device. */
1901 if (!!enable == !!dev->wakeup_prepared)
1902 return 0;
1903
1904 /*
1905 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1906 * Anderson we should be doing PME# wake enable followed by ACPI wake
1907 * enable. To disable wake-up we call the platform first, for symmetry.
1908 */
1909
1910 if (enable) {
1911 int error;
1912
1913 if (pci_pme_capable(dev, state))
1914 pci_pme_active(dev, true);
1915 else
1916 ret = 1;
1917 error = runtime ? platform_pci_run_wake(dev, true) :
1918 platform_pci_sleep_wake(dev, true);
1919 if (ret)
1920 ret = error;
1921 if (!ret)
1922 dev->wakeup_prepared = true;
1923 } else {
1924 if (runtime)
1925 platform_pci_run_wake(dev, false);
1926 else
1927 platform_pci_sleep_wake(dev, false);
1928 pci_pme_active(dev, false);
1929 dev->wakeup_prepared = false;
1930 }
1931
1932 return ret;
1933 }
1934 EXPORT_SYMBOL(__pci_enable_wake);
1935
1936 /**
1937 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
1938 * @dev: PCI device to prepare
1939 * @enable: True to enable wake-up event generation; false to disable
1940 *
1941 * Many drivers want the device to wake up the system from D3_hot or D3_cold
1942 * and this function allows them to set that up cleanly - pci_enable_wake()
1943 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
1944 * ordering constraints.
1945 *
1946 * This function only returns error code if the device is not capable of
1947 * generating PME# from both D3_hot and D3_cold, and the platform is unable to
1948 * enable wake-up power for it.
1949 */
pci_wake_from_d3(struct pci_dev * dev,bool enable)1950 int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1951 {
1952 return pci_pme_capable(dev, PCI_D3cold) ?
1953 pci_enable_wake(dev, PCI_D3cold, enable) :
1954 pci_enable_wake(dev, PCI_D3hot, enable);
1955 }
1956 EXPORT_SYMBOL(pci_wake_from_d3);
1957
1958 /**
1959 * pci_target_state - find an appropriate low power state for a given PCI dev
1960 * @dev: PCI device
1961 *
1962 * Use underlying platform code to find a supported low power state for @dev.
1963 * If the platform can't manage @dev, return the deepest state from which it
1964 * can generate wake events, based on any available PME info.
1965 */
pci_target_state(struct pci_dev * dev)1966 static pci_power_t pci_target_state(struct pci_dev *dev)
1967 {
1968 pci_power_t target_state = PCI_D3hot;
1969
1970 if (platform_pci_power_manageable(dev)) {
1971 /*
1972 * Call the platform to choose the target state of the device
1973 * and enable wake-up from this state if supported.
1974 */
1975 pci_power_t state = platform_pci_choose_state(dev);
1976
1977 switch (state) {
1978 case PCI_POWER_ERROR:
1979 case PCI_UNKNOWN:
1980 break;
1981 case PCI_D1:
1982 case PCI_D2:
1983 if (pci_no_d1d2(dev))
1984 break;
1985 default:
1986 target_state = state;
1987 }
1988
1989 return target_state;
1990 }
1991
1992 if (!dev->pm_cap)
1993 target_state = PCI_D0;
1994
1995 /*
1996 * If the device is in D3cold even though it's not power-manageable by
1997 * the platform, it may have been powered down by non-standard means.
1998 * Best to let it slumber.
1999 */
2000 if (dev->current_state == PCI_D3cold)
2001 target_state = PCI_D3cold;
2002
2003 if (device_may_wakeup(&dev->dev)) {
2004 /*
2005 * Find the deepest state from which the device can generate
2006 * wake-up events, make it the target state and enable device
2007 * to generate PME#.
2008 */
2009 if (dev->pme_support) {
2010 while (target_state
2011 && !(dev->pme_support & (1 << target_state)))
2012 target_state--;
2013 }
2014 }
2015
2016 return target_state;
2017 }
2018
2019 /**
2020 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
2021 * @dev: Device to handle.
2022 *
2023 * Choose the power state appropriate for the device depending on whether
2024 * it can wake up the system and/or is power manageable by the platform
2025 * (PCI_D3hot is the default) and put the device into that state.
2026 */
pci_prepare_to_sleep(struct pci_dev * dev)2027 int pci_prepare_to_sleep(struct pci_dev *dev)
2028 {
2029 pci_power_t target_state = pci_target_state(dev);
2030 int error;
2031
2032 if (target_state == PCI_POWER_ERROR)
2033 return -EIO;
2034
2035 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
2036
2037 error = pci_set_power_state(dev, target_state);
2038
2039 if (error)
2040 pci_enable_wake(dev, target_state, false);
2041
2042 return error;
2043 }
2044 EXPORT_SYMBOL(pci_prepare_to_sleep);
2045
2046 /**
2047 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
2048 * @dev: Device to handle.
2049 *
2050 * Disable device's system wake-up capability and put it into D0.
2051 */
pci_back_from_sleep(struct pci_dev * dev)2052 int pci_back_from_sleep(struct pci_dev *dev)
2053 {
2054 pci_enable_wake(dev, PCI_D0, false);
2055 return pci_set_power_state(dev, PCI_D0);
2056 }
2057 EXPORT_SYMBOL(pci_back_from_sleep);
2058
2059 /**
2060 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
2061 * @dev: PCI device being suspended.
2062 *
2063 * Prepare @dev to generate wake-up events at run time and put it into a low
2064 * power state.
2065 */
pci_finish_runtime_suspend(struct pci_dev * dev)2066 int pci_finish_runtime_suspend(struct pci_dev *dev)
2067 {
2068 pci_power_t target_state = pci_target_state(dev);
2069 int error;
2070
2071 if (target_state == PCI_POWER_ERROR)
2072 return -EIO;
2073
2074 dev->runtime_d3cold = target_state == PCI_D3cold;
2075
2076 __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
2077
2078 error = pci_set_power_state(dev, target_state);
2079
2080 if (error) {
2081 __pci_enable_wake(dev, target_state, true, false);
2082 dev->runtime_d3cold = false;
2083 }
2084
2085 return error;
2086 }
2087
2088 /**
2089 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
2090 * @dev: Device to check.
2091 *
2092 * Return true if the device itself is capable of generating wake-up events
2093 * (through the platform or using the native PCIe PME) or if the device supports
2094 * PME and one of its upstream bridges can generate wake-up events.
2095 */
pci_dev_run_wake(struct pci_dev * dev)2096 bool pci_dev_run_wake(struct pci_dev *dev)
2097 {
2098 struct pci_bus *bus = dev->bus;
2099
2100 if (device_run_wake(&dev->dev))
2101 return true;
2102
2103 if (!dev->pme_support)
2104 return false;
2105
2106 /* PME-capable in principle, but not from the intended sleep state */
2107 if (!pci_pme_capable(dev, pci_target_state(dev)))
2108 return false;
2109
2110 while (bus->parent) {
2111 struct pci_dev *bridge = bus->self;
2112
2113 if (device_run_wake(&bridge->dev))
2114 return true;
2115
2116 bus = bus->parent;
2117 }
2118
2119 /* We have reached the root bus. */
2120 if (bus->bridge)
2121 return device_run_wake(bus->bridge);
2122
2123 return false;
2124 }
2125 EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2126
2127 /**
2128 * pci_dev_keep_suspended - Check if the device can stay in the suspended state.
2129 * @pci_dev: Device to check.
2130 *
2131 * Return 'true' if the device is runtime-suspended, it doesn't have to be
2132 * reconfigured due to wakeup settings difference between system and runtime
2133 * suspend and the current power state of it is suitable for the upcoming
2134 * (system) transition.
2135 *
2136 * If the device is not configured for system wakeup, disable PME for it before
2137 * returning 'true' to prevent it from waking up the system unnecessarily.
2138 */
pci_dev_keep_suspended(struct pci_dev * pci_dev)2139 bool pci_dev_keep_suspended(struct pci_dev *pci_dev)
2140 {
2141 struct device *dev = &pci_dev->dev;
2142
2143 if (!pm_runtime_suspended(dev)
2144 || pci_target_state(pci_dev) != pci_dev->current_state
2145 || platform_pci_need_resume(pci_dev)
2146 || (pci_dev->dev_flags & PCI_DEV_FLAGS_NEEDS_RESUME))
2147 return false;
2148
2149 /*
2150 * At this point the device is good to go unless it's been configured
2151 * to generate PME at the runtime suspend time, but it is not supposed
2152 * to wake up the system. In that case, simply disable PME for it
2153 * (it will have to be re-enabled on exit from system resume).
2154 *
2155 * If the device's power state is D3cold and the platform check above
2156 * hasn't triggered, the device's configuration is suitable and we don't
2157 * need to manipulate it at all.
2158 */
2159 spin_lock_irq(&dev->power.lock);
2160
2161 if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold &&
2162 !device_may_wakeup(dev))
2163 __pci_pme_active(pci_dev, false);
2164
2165 spin_unlock_irq(&dev->power.lock);
2166 return true;
2167 }
2168
2169 /**
2170 * pci_dev_complete_resume - Finalize resume from system sleep for a device.
2171 * @pci_dev: Device to handle.
2172 *
2173 * If the device is runtime suspended and wakeup-capable, enable PME for it as
2174 * it might have been disabled during the prepare phase of system suspend if
2175 * the device was not configured for system wakeup.
2176 */
pci_dev_complete_resume(struct pci_dev * pci_dev)2177 void pci_dev_complete_resume(struct pci_dev *pci_dev)
2178 {
2179 struct device *dev = &pci_dev->dev;
2180
2181 if (!pci_dev_run_wake(pci_dev))
2182 return;
2183
2184 spin_lock_irq(&dev->power.lock);
2185
2186 if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
2187 __pci_pme_active(pci_dev, true);
2188
2189 spin_unlock_irq(&dev->power.lock);
2190 }
2191
pci_config_pm_runtime_get(struct pci_dev * pdev)2192 void pci_config_pm_runtime_get(struct pci_dev *pdev)
2193 {
2194 struct device *dev = &pdev->dev;
2195 struct device *parent = dev->parent;
2196
2197 if (parent)
2198 pm_runtime_get_sync(parent);
2199 pm_runtime_get_noresume(dev);
2200 /*
2201 * pdev->current_state is set to PCI_D3cold during suspending,
2202 * so wait until suspending completes
2203 */
2204 pm_runtime_barrier(dev);
2205 /*
2206 * Only need to resume devices in D3cold, because config
2207 * registers are still accessible for devices suspended but
2208 * not in D3cold.
2209 */
2210 if (pdev->current_state == PCI_D3cold)
2211 pm_runtime_resume(dev);
2212 }
2213
pci_config_pm_runtime_put(struct pci_dev * pdev)2214 void pci_config_pm_runtime_put(struct pci_dev *pdev)
2215 {
2216 struct device *dev = &pdev->dev;
2217 struct device *parent = dev->parent;
2218
2219 pm_runtime_put(dev);
2220 if (parent)
2221 pm_runtime_put_sync(parent);
2222 }
2223
2224 /**
2225 * pci_bridge_d3_possible - Is it possible to put the bridge into D3
2226 * @bridge: Bridge to check
2227 *
2228 * This function checks if it is possible to move the bridge to D3.
2229 * Currently we only allow D3 for recent enough PCIe ports.
2230 */
pci_bridge_d3_possible(struct pci_dev * bridge)2231 static bool pci_bridge_d3_possible(struct pci_dev *bridge)
2232 {
2233 unsigned int year;
2234
2235 if (!pci_is_pcie(bridge))
2236 return false;
2237
2238 switch (pci_pcie_type(bridge)) {
2239 case PCI_EXP_TYPE_ROOT_PORT:
2240 case PCI_EXP_TYPE_UPSTREAM:
2241 case PCI_EXP_TYPE_DOWNSTREAM:
2242 if (pci_bridge_d3_disable)
2243 return false;
2244 if (pci_bridge_d3_force)
2245 return true;
2246
2247 /*
2248 * It should be safe to put PCIe ports from 2015 or newer
2249 * to D3.
2250 */
2251 if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) &&
2252 year >= 2015) {
2253 return true;
2254 }
2255 break;
2256 }
2257
2258 return false;
2259 }
2260
pci_dev_check_d3cold(struct pci_dev * dev,void * data)2261 static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
2262 {
2263 bool *d3cold_ok = data;
2264 bool no_d3cold;
2265
2266 /*
2267 * The device needs to be allowed to go D3cold and if it is wake
2268 * capable to do so from D3cold.
2269 */
2270 no_d3cold = dev->no_d3cold || !dev->d3cold_allowed ||
2271 (device_may_wakeup(&dev->dev) && !pci_pme_capable(dev, PCI_D3cold)) ||
2272 !pci_power_manageable(dev);
2273
2274 *d3cold_ok = !no_d3cold;
2275
2276 return no_d3cold;
2277 }
2278
2279 /*
2280 * pci_bridge_d3_update - Update bridge D3 capabilities
2281 * @dev: PCI device which is changed
2282 * @remove: Is the device being removed
2283 *
2284 * Update upstream bridge PM capabilities accordingly depending on if the
2285 * device PM configuration was changed or the device is being removed. The
2286 * change is also propagated upstream.
2287 */
pci_bridge_d3_update(struct pci_dev * dev,bool remove)2288 static void pci_bridge_d3_update(struct pci_dev *dev, bool remove)
2289 {
2290 struct pci_dev *bridge;
2291 bool d3cold_ok = true;
2292
2293 bridge = pci_upstream_bridge(dev);
2294 if (!bridge || !pci_bridge_d3_possible(bridge))
2295 return;
2296
2297 pci_dev_get(bridge);
2298 /*
2299 * If the device is removed we do not care about its D3cold
2300 * capabilities.
2301 */
2302 if (!remove)
2303 pci_dev_check_d3cold(dev, &d3cold_ok);
2304
2305 if (d3cold_ok) {
2306 /*
2307 * We need to go through all children to find out if all of
2308 * them can still go to D3cold.
2309 */
2310 pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
2311 &d3cold_ok);
2312 }
2313
2314 if (bridge->bridge_d3 != d3cold_ok) {
2315 bridge->bridge_d3 = d3cold_ok;
2316 /* Propagate change to upstream bridges */
2317 pci_bridge_d3_update(bridge, false);
2318 }
2319
2320 pci_dev_put(bridge);
2321 }
2322
2323 /**
2324 * pci_bridge_d3_device_changed - Update bridge D3 capabilities on change
2325 * @dev: PCI device that was changed
2326 *
2327 * If a device is added or its PM configuration, such as is it allowed to
2328 * enter D3cold, is changed this function updates upstream bridge PM
2329 * capabilities accordingly.
2330 */
pci_bridge_d3_device_changed(struct pci_dev * dev)2331 void pci_bridge_d3_device_changed(struct pci_dev *dev)
2332 {
2333 pci_bridge_d3_update(dev, false);
2334 }
2335
2336 /**
2337 * pci_bridge_d3_device_removed - Update bridge D3 capabilities on remove
2338 * @dev: PCI device being removed
2339 *
2340 * Function updates upstream bridge PM capabilities based on other devices
2341 * still left on the bus.
2342 */
pci_bridge_d3_device_removed(struct pci_dev * dev)2343 void pci_bridge_d3_device_removed(struct pci_dev *dev)
2344 {
2345 pci_bridge_d3_update(dev, true);
2346 }
2347
2348 /**
2349 * pci_d3cold_enable - Enable D3cold for device
2350 * @dev: PCI device to handle
2351 *
2352 * This function can be used in drivers to enable D3cold from the device
2353 * they handle. It also updates upstream PCI bridge PM capabilities
2354 * accordingly.
2355 */
pci_d3cold_enable(struct pci_dev * dev)2356 void pci_d3cold_enable(struct pci_dev *dev)
2357 {
2358 if (dev->no_d3cold) {
2359 dev->no_d3cold = false;
2360 pci_bridge_d3_device_changed(dev);
2361 }
2362 }
2363 EXPORT_SYMBOL_GPL(pci_d3cold_enable);
2364
2365 /**
2366 * pci_d3cold_disable - Disable D3cold for device
2367 * @dev: PCI device to handle
2368 *
2369 * This function can be used in drivers to disable D3cold from the device
2370 * they handle. It also updates upstream PCI bridge PM capabilities
2371 * accordingly.
2372 */
pci_d3cold_disable(struct pci_dev * dev)2373 void pci_d3cold_disable(struct pci_dev *dev)
2374 {
2375 if (!dev->no_d3cold) {
2376 dev->no_d3cold = true;
2377 pci_bridge_d3_device_changed(dev);
2378 }
2379 }
2380 EXPORT_SYMBOL_GPL(pci_d3cold_disable);
2381
2382 /**
2383 * pci_pm_init - Initialize PM functions of given PCI device
2384 * @dev: PCI device to handle.
2385 */
pci_pm_init(struct pci_dev * dev)2386 void pci_pm_init(struct pci_dev *dev)
2387 {
2388 int pm;
2389 u16 pmc;
2390
2391 pm_runtime_forbid(&dev->dev);
2392 pm_runtime_set_active(&dev->dev);
2393 pm_runtime_enable(&dev->dev);
2394 device_enable_async_suspend(&dev->dev);
2395 dev->wakeup_prepared = false;
2396
2397 dev->pm_cap = 0;
2398 dev->pme_support = 0;
2399
2400 /* find PCI PM capability in list */
2401 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
2402 if (!pm)
2403 return;
2404 /* Check device's ability to generate PME# */
2405 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
2406
2407 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
2408 dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
2409 pmc & PCI_PM_CAP_VER_MASK);
2410 return;
2411 }
2412
2413 dev->pm_cap = pm;
2414 dev->d3_delay = PCI_PM_D3_WAIT;
2415 dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
2416 dev->bridge_d3 = pci_bridge_d3_possible(dev);
2417 dev->d3cold_allowed = true;
2418
2419 dev->d1_support = false;
2420 dev->d2_support = false;
2421 if (!pci_no_d1d2(dev)) {
2422 if (pmc & PCI_PM_CAP_D1)
2423 dev->d1_support = true;
2424 if (pmc & PCI_PM_CAP_D2)
2425 dev->d2_support = true;
2426
2427 if (dev->d1_support || dev->d2_support)
2428 dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
2429 dev->d1_support ? " D1" : "",
2430 dev->d2_support ? " D2" : "");
2431 }
2432
2433 pmc &= PCI_PM_CAP_PME_MASK;
2434 if (pmc) {
2435 dev_printk(KERN_DEBUG, &dev->dev,
2436 "PME# supported from%s%s%s%s%s\n",
2437 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
2438 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
2439 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
2440 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
2441 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
2442 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
2443 dev->pme_poll = true;
2444 /*
2445 * Make device's PM flags reflect the wake-up capability, but
2446 * let the user space enable it to wake up the system as needed.
2447 */
2448 device_set_wakeup_capable(&dev->dev, true);
2449 /* Disable the PME# generation functionality */
2450 pci_pme_active(dev, false);
2451 }
2452 }
2453
pci_ea_flags(struct pci_dev * dev,u8 prop)2454 static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
2455 {
2456 unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
2457
2458 switch (prop) {
2459 case PCI_EA_P_MEM:
2460 case PCI_EA_P_VF_MEM:
2461 flags |= IORESOURCE_MEM;
2462 break;
2463 case PCI_EA_P_MEM_PREFETCH:
2464 case PCI_EA_P_VF_MEM_PREFETCH:
2465 flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
2466 break;
2467 case PCI_EA_P_IO:
2468 flags |= IORESOURCE_IO;
2469 break;
2470 default:
2471 return 0;
2472 }
2473
2474 return flags;
2475 }
2476
pci_ea_get_resource(struct pci_dev * dev,u8 bei,u8 prop)2477 static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
2478 u8 prop)
2479 {
2480 if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
2481 return &dev->resource[bei];
2482 #ifdef CONFIG_PCI_IOV
2483 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
2484 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
2485 return &dev->resource[PCI_IOV_RESOURCES +
2486 bei - PCI_EA_BEI_VF_BAR0];
2487 #endif
2488 else if (bei == PCI_EA_BEI_ROM)
2489 return &dev->resource[PCI_ROM_RESOURCE];
2490 else
2491 return NULL;
2492 }
2493
2494 /* Read an Enhanced Allocation (EA) entry */
pci_ea_read(struct pci_dev * dev,int offset)2495 static int pci_ea_read(struct pci_dev *dev, int offset)
2496 {
2497 struct resource *res;
2498 int ent_size, ent_offset = offset;
2499 resource_size_t start, end;
2500 unsigned long flags;
2501 u32 dw0, bei, base, max_offset;
2502 u8 prop;
2503 bool support_64 = (sizeof(resource_size_t) >= 8);
2504
2505 pci_read_config_dword(dev, ent_offset, &dw0);
2506 ent_offset += 4;
2507
2508 /* Entry size field indicates DWORDs after 1st */
2509 ent_size = ((dw0 & PCI_EA_ES) + 1) << 2;
2510
2511 if (!(dw0 & PCI_EA_ENABLE)) /* Entry not enabled */
2512 goto out;
2513
2514 bei = (dw0 & PCI_EA_BEI) >> 4;
2515 prop = (dw0 & PCI_EA_PP) >> 8;
2516
2517 /*
2518 * If the Property is in the reserved range, try the Secondary
2519 * Property instead.
2520 */
2521 if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
2522 prop = (dw0 & PCI_EA_SP) >> 16;
2523 if (prop > PCI_EA_P_BRIDGE_IO)
2524 goto out;
2525
2526 res = pci_ea_get_resource(dev, bei, prop);
2527 if (!res) {
2528 dev_err(&dev->dev, "Unsupported EA entry BEI: %u\n", bei);
2529 goto out;
2530 }
2531
2532 flags = pci_ea_flags(dev, prop);
2533 if (!flags) {
2534 dev_err(&dev->dev, "Unsupported EA properties: %#x\n", prop);
2535 goto out;
2536 }
2537
2538 /* Read Base */
2539 pci_read_config_dword(dev, ent_offset, &base);
2540 start = (base & PCI_EA_FIELD_MASK);
2541 ent_offset += 4;
2542
2543 /* Read MaxOffset */
2544 pci_read_config_dword(dev, ent_offset, &max_offset);
2545 ent_offset += 4;
2546
2547 /* Read Base MSBs (if 64-bit entry) */
2548 if (base & PCI_EA_IS_64) {
2549 u32 base_upper;
2550
2551 pci_read_config_dword(dev, ent_offset, &base_upper);
2552 ent_offset += 4;
2553
2554 flags |= IORESOURCE_MEM_64;
2555
2556 /* entry starts above 32-bit boundary, can't use */
2557 if (!support_64 && base_upper)
2558 goto out;
2559
2560 if (support_64)
2561 start |= ((u64)base_upper << 32);
2562 }
2563
2564 end = start + (max_offset | 0x03);
2565
2566 /* Read MaxOffset MSBs (if 64-bit entry) */
2567 if (max_offset & PCI_EA_IS_64) {
2568 u32 max_offset_upper;
2569
2570 pci_read_config_dword(dev, ent_offset, &max_offset_upper);
2571 ent_offset += 4;
2572
2573 flags |= IORESOURCE_MEM_64;
2574
2575 /* entry too big, can't use */
2576 if (!support_64 && max_offset_upper)
2577 goto out;
2578
2579 if (support_64)
2580 end += ((u64)max_offset_upper << 32);
2581 }
2582
2583 if (end < start) {
2584 dev_err(&dev->dev, "EA Entry crosses address boundary\n");
2585 goto out;
2586 }
2587
2588 if (ent_size != ent_offset - offset) {
2589 dev_err(&dev->dev,
2590 "EA Entry Size (%d) does not match length read (%d)\n",
2591 ent_size, ent_offset - offset);
2592 goto out;
2593 }
2594
2595 res->name = pci_name(dev);
2596 res->start = start;
2597 res->end = end;
2598 res->flags = flags;
2599
2600 if (bei <= PCI_EA_BEI_BAR5)
2601 dev_printk(KERN_DEBUG, &dev->dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
2602 bei, res, prop);
2603 else if (bei == PCI_EA_BEI_ROM)
2604 dev_printk(KERN_DEBUG, &dev->dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
2605 res, prop);
2606 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
2607 dev_printk(KERN_DEBUG, &dev->dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
2608 bei - PCI_EA_BEI_VF_BAR0, res, prop);
2609 else
2610 dev_printk(KERN_DEBUG, &dev->dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
2611 bei, res, prop);
2612
2613 out:
2614 return offset + ent_size;
2615 }
2616
2617 /* Enhanced Allocation Initialization */
pci_ea_init(struct pci_dev * dev)2618 void pci_ea_init(struct pci_dev *dev)
2619 {
2620 int ea;
2621 u8 num_ent;
2622 int offset;
2623 int i;
2624
2625 /* find PCI EA capability in list */
2626 ea = pci_find_capability(dev, PCI_CAP_ID_EA);
2627 if (!ea)
2628 return;
2629
2630 /* determine the number of entries */
2631 pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
2632 &num_ent);
2633 num_ent &= PCI_EA_NUM_ENT_MASK;
2634
2635 offset = ea + PCI_EA_FIRST_ENT;
2636
2637 /* Skip DWORD 2 for type 1 functions */
2638 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
2639 offset += 4;
2640
2641 /* parse each EA entry */
2642 for (i = 0; i < num_ent; ++i)
2643 offset = pci_ea_read(dev, offset);
2644 }
2645
pci_add_saved_cap(struct pci_dev * pci_dev,struct pci_cap_saved_state * new_cap)2646 static void pci_add_saved_cap(struct pci_dev *pci_dev,
2647 struct pci_cap_saved_state *new_cap)
2648 {
2649 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
2650 }
2651
2652 /**
2653 * _pci_add_cap_save_buffer - allocate buffer for saving given
2654 * capability registers
2655 * @dev: the PCI device
2656 * @cap: the capability to allocate the buffer for
2657 * @extended: Standard or Extended capability ID
2658 * @size: requested size of the buffer
2659 */
_pci_add_cap_save_buffer(struct pci_dev * dev,u16 cap,bool extended,unsigned int size)2660 static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
2661 bool extended, unsigned int size)
2662 {
2663 int pos;
2664 struct pci_cap_saved_state *save_state;
2665
2666 if (extended)
2667 pos = pci_find_ext_capability(dev, cap);
2668 else
2669 pos = pci_find_capability(dev, cap);
2670
2671 if (!pos)
2672 return 0;
2673
2674 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
2675 if (!save_state)
2676 return -ENOMEM;
2677
2678 save_state->cap.cap_nr = cap;
2679 save_state->cap.cap_extended = extended;
2680 save_state->cap.size = size;
2681 pci_add_saved_cap(dev, save_state);
2682
2683 return 0;
2684 }
2685
pci_add_cap_save_buffer(struct pci_dev * dev,char cap,unsigned int size)2686 int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
2687 {
2688 return _pci_add_cap_save_buffer(dev, cap, false, size);
2689 }
2690
pci_add_ext_cap_save_buffer(struct pci_dev * dev,u16 cap,unsigned int size)2691 int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
2692 {
2693 return _pci_add_cap_save_buffer(dev, cap, true, size);
2694 }
2695
2696 /**
2697 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
2698 * @dev: the PCI device
2699 */
pci_allocate_cap_save_buffers(struct pci_dev * dev)2700 void pci_allocate_cap_save_buffers(struct pci_dev *dev)
2701 {
2702 int error;
2703
2704 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
2705 PCI_EXP_SAVE_REGS * sizeof(u16));
2706 if (error)
2707 dev_err(&dev->dev,
2708 "unable to preallocate PCI Express save buffer\n");
2709
2710 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
2711 if (error)
2712 dev_err(&dev->dev,
2713 "unable to preallocate PCI-X save buffer\n");
2714
2715 pci_allocate_vc_save_buffers(dev);
2716 }
2717
pci_free_cap_save_buffers(struct pci_dev * dev)2718 void pci_free_cap_save_buffers(struct pci_dev *dev)
2719 {
2720 struct pci_cap_saved_state *tmp;
2721 struct hlist_node *n;
2722
2723 hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
2724 kfree(tmp);
2725 }
2726
2727 /**
2728 * pci_configure_ari - enable or disable ARI forwarding
2729 * @dev: the PCI device
2730 *
2731 * If @dev and its upstream bridge both support ARI, enable ARI in the
2732 * bridge. Otherwise, disable ARI in the bridge.
2733 */
pci_configure_ari(struct pci_dev * dev)2734 void pci_configure_ari(struct pci_dev *dev)
2735 {
2736 u32 cap;
2737 struct pci_dev *bridge;
2738
2739 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
2740 return;
2741
2742 bridge = dev->bus->self;
2743 if (!bridge)
2744 return;
2745
2746 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
2747 if (!(cap & PCI_EXP_DEVCAP2_ARI))
2748 return;
2749
2750 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
2751 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
2752 PCI_EXP_DEVCTL2_ARI);
2753 bridge->ari_enabled = 1;
2754 } else {
2755 pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
2756 PCI_EXP_DEVCTL2_ARI);
2757 bridge->ari_enabled = 0;
2758 }
2759 }
2760
2761 static int pci_acs_enable;
2762
2763 /**
2764 * pci_request_acs - ask for ACS to be enabled if supported
2765 */
pci_request_acs(void)2766 void pci_request_acs(void)
2767 {
2768 pci_acs_enable = 1;
2769 }
2770
2771 /**
2772 * pci_std_enable_acs - enable ACS on devices using standard ACS capabilites
2773 * @dev: the PCI device
2774 */
pci_std_enable_acs(struct pci_dev * dev)2775 static void pci_std_enable_acs(struct pci_dev *dev)
2776 {
2777 int pos;
2778 u16 cap;
2779 u16 ctrl;
2780
2781 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
2782 if (!pos)
2783 return;
2784
2785 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
2786 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
2787
2788 /* Source Validation */
2789 ctrl |= (cap & PCI_ACS_SV);
2790
2791 /* P2P Request Redirect */
2792 ctrl |= (cap & PCI_ACS_RR);
2793
2794 /* P2P Completion Redirect */
2795 ctrl |= (cap & PCI_ACS_CR);
2796
2797 /* Upstream Forwarding */
2798 ctrl |= (cap & PCI_ACS_UF);
2799
2800 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
2801 }
2802
2803 /**
2804 * pci_enable_acs - enable ACS if hardware support it
2805 * @dev: the PCI device
2806 */
pci_enable_acs(struct pci_dev * dev)2807 void pci_enable_acs(struct pci_dev *dev)
2808 {
2809 if (!pci_acs_enable)
2810 return;
2811
2812 if (!pci_dev_specific_enable_acs(dev))
2813 return;
2814
2815 pci_std_enable_acs(dev);
2816 }
2817
pci_acs_flags_enabled(struct pci_dev * pdev,u16 acs_flags)2818 static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
2819 {
2820 int pos;
2821 u16 cap, ctrl;
2822
2823 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
2824 if (!pos)
2825 return false;
2826
2827 /*
2828 * Except for egress control, capabilities are either required
2829 * or only required if controllable. Features missing from the
2830 * capability field can therefore be assumed as hard-wired enabled.
2831 */
2832 pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
2833 acs_flags &= (cap | PCI_ACS_EC);
2834
2835 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
2836 return (ctrl & acs_flags) == acs_flags;
2837 }
2838
2839 /**
2840 * pci_acs_enabled - test ACS against required flags for a given device
2841 * @pdev: device to test
2842 * @acs_flags: required PCI ACS flags
2843 *
2844 * Return true if the device supports the provided flags. Automatically
2845 * filters out flags that are not implemented on multifunction devices.
2846 *
2847 * Note that this interface checks the effective ACS capabilities of the
2848 * device rather than the actual capabilities. For instance, most single
2849 * function endpoints are not required to support ACS because they have no
2850 * opportunity for peer-to-peer access. We therefore return 'true'
2851 * regardless of whether the device exposes an ACS capability. This makes
2852 * it much easier for callers of this function to ignore the actual type
2853 * or topology of the device when testing ACS support.
2854 */
pci_acs_enabled(struct pci_dev * pdev,u16 acs_flags)2855 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
2856 {
2857 int ret;
2858
2859 ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
2860 if (ret >= 0)
2861 return ret > 0;
2862
2863 /*
2864 * Conventional PCI and PCI-X devices never support ACS, either
2865 * effectively or actually. The shared bus topology implies that
2866 * any device on the bus can receive or snoop DMA.
2867 */
2868 if (!pci_is_pcie(pdev))
2869 return false;
2870
2871 switch (pci_pcie_type(pdev)) {
2872 /*
2873 * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
2874 * but since their primary interface is PCI/X, we conservatively
2875 * handle them as we would a non-PCIe device.
2876 */
2877 case PCI_EXP_TYPE_PCIE_BRIDGE:
2878 /*
2879 * PCIe 3.0, 6.12.1 excludes ACS on these devices. "ACS is never
2880 * applicable... must never implement an ACS Extended Capability...".
2881 * This seems arbitrary, but we take a conservative interpretation
2882 * of this statement.
2883 */
2884 case PCI_EXP_TYPE_PCI_BRIDGE:
2885 case PCI_EXP_TYPE_RC_EC:
2886 return false;
2887 /*
2888 * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should
2889 * implement ACS in order to indicate their peer-to-peer capabilities,
2890 * regardless of whether they are single- or multi-function devices.
2891 */
2892 case PCI_EXP_TYPE_DOWNSTREAM:
2893 case PCI_EXP_TYPE_ROOT_PORT:
2894 return pci_acs_flags_enabled(pdev, acs_flags);
2895 /*
2896 * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
2897 * implemented by the remaining PCIe types to indicate peer-to-peer
2898 * capabilities, but only when they are part of a multifunction
2899 * device. The footnote for section 6.12 indicates the specific
2900 * PCIe types included here.
2901 */
2902 case PCI_EXP_TYPE_ENDPOINT:
2903 case PCI_EXP_TYPE_UPSTREAM:
2904 case PCI_EXP_TYPE_LEG_END:
2905 case PCI_EXP_TYPE_RC_END:
2906 if (!pdev->multifunction)
2907 break;
2908
2909 return pci_acs_flags_enabled(pdev, acs_flags);
2910 }
2911
2912 /*
2913 * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable
2914 * to single function devices with the exception of downstream ports.
2915 */
2916 return true;
2917 }
2918
2919 /**
2920 * pci_acs_path_enable - test ACS flags from start to end in a hierarchy
2921 * @start: starting downstream device
2922 * @end: ending upstream device or NULL to search to the root bus
2923 * @acs_flags: required flags
2924 *
2925 * Walk up a device tree from start to end testing PCI ACS support. If
2926 * any step along the way does not support the required flags, return false.
2927 */
pci_acs_path_enabled(struct pci_dev * start,struct pci_dev * end,u16 acs_flags)2928 bool pci_acs_path_enabled(struct pci_dev *start,
2929 struct pci_dev *end, u16 acs_flags)
2930 {
2931 struct pci_dev *pdev, *parent = start;
2932
2933 do {
2934 pdev = parent;
2935
2936 if (!pci_acs_enabled(pdev, acs_flags))
2937 return false;
2938
2939 if (pci_is_root_bus(pdev->bus))
2940 return (end == NULL);
2941
2942 parent = pdev->bus->self;
2943 } while (pdev != end);
2944
2945 return true;
2946 }
2947
2948 /**
2949 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
2950 * @dev: the PCI device
2951 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
2952 *
2953 * Perform INTx swizzling for a device behind one level of bridge. This is
2954 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
2955 * behind bridges on add-in cards. For devices with ARI enabled, the slot
2956 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
2957 * the PCI Express Base Specification, Revision 2.1)
2958 */
pci_swizzle_interrupt_pin(const struct pci_dev * dev,u8 pin)2959 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
2960 {
2961 int slot;
2962
2963 if (pci_ari_enabled(dev->bus))
2964 slot = 0;
2965 else
2966 slot = PCI_SLOT(dev->devfn);
2967
2968 return (((pin - 1) + slot) % 4) + 1;
2969 }
2970
pci_get_interrupt_pin(struct pci_dev * dev,struct pci_dev ** bridge)2971 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
2972 {
2973 u8 pin;
2974
2975 pin = dev->pin;
2976 if (!pin)
2977 return -1;
2978
2979 while (!pci_is_root_bus(dev->bus)) {
2980 pin = pci_swizzle_interrupt_pin(dev, pin);
2981 dev = dev->bus->self;
2982 }
2983 *bridge = dev;
2984 return pin;
2985 }
2986
2987 /**
2988 * pci_common_swizzle - swizzle INTx all the way to root bridge
2989 * @dev: the PCI device
2990 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2991 *
2992 * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
2993 * bridges all the way up to a PCI root bus.
2994 */
pci_common_swizzle(struct pci_dev * dev,u8 * pinp)2995 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
2996 {
2997 u8 pin = *pinp;
2998
2999 while (!pci_is_root_bus(dev->bus)) {
3000 pin = pci_swizzle_interrupt_pin(dev, pin);
3001 dev = dev->bus->self;
3002 }
3003 *pinp = pin;
3004 return PCI_SLOT(dev->devfn);
3005 }
3006 EXPORT_SYMBOL_GPL(pci_common_swizzle);
3007
3008 /**
3009 * pci_release_region - Release a PCI bar
3010 * @pdev: PCI device whose resources were previously reserved by pci_request_region
3011 * @bar: BAR to release
3012 *
3013 * Releases the PCI I/O and memory resources previously reserved by a
3014 * successful call to pci_request_region. Call this function only
3015 * after all use of the PCI regions has ceased.
3016 */
pci_release_region(struct pci_dev * pdev,int bar)3017 void pci_release_region(struct pci_dev *pdev, int bar)
3018 {
3019 struct pci_devres *dr;
3020
3021 if (pci_resource_len(pdev, bar) == 0)
3022 return;
3023 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
3024 release_region(pci_resource_start(pdev, bar),
3025 pci_resource_len(pdev, bar));
3026 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
3027 release_mem_region(pci_resource_start(pdev, bar),
3028 pci_resource_len(pdev, bar));
3029
3030 dr = find_pci_dr(pdev);
3031 if (dr)
3032 dr->region_mask &= ~(1 << bar);
3033 }
3034 EXPORT_SYMBOL(pci_release_region);
3035
3036 /**
3037 * __pci_request_region - Reserved PCI I/O and memory resource
3038 * @pdev: PCI device whose resources are to be reserved
3039 * @bar: BAR to be reserved
3040 * @res_name: Name to be associated with resource.
3041 * @exclusive: whether the region access is exclusive or not
3042 *
3043 * Mark the PCI region associated with PCI device @pdev BR @bar as
3044 * being reserved by owner @res_name. Do not access any
3045 * address inside the PCI regions unless this call returns
3046 * successfully.
3047 *
3048 * If @exclusive is set, then the region is marked so that userspace
3049 * is explicitly not allowed to map the resource via /dev/mem or
3050 * sysfs MMIO access.
3051 *
3052 * Returns 0 on success, or %EBUSY on error. A warning
3053 * message is also printed on failure.
3054 */
__pci_request_region(struct pci_dev * pdev,int bar,const char * res_name,int exclusive)3055 static int __pci_request_region(struct pci_dev *pdev, int bar,
3056 const char *res_name, int exclusive)
3057 {
3058 struct pci_devres *dr;
3059
3060 if (pci_resource_len(pdev, bar) == 0)
3061 return 0;
3062
3063 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
3064 if (!request_region(pci_resource_start(pdev, bar),
3065 pci_resource_len(pdev, bar), res_name))
3066 goto err_out;
3067 } else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
3068 if (!__request_mem_region(pci_resource_start(pdev, bar),
3069 pci_resource_len(pdev, bar), res_name,
3070 exclusive))
3071 goto err_out;
3072 }
3073
3074 dr = find_pci_dr(pdev);
3075 if (dr)
3076 dr->region_mask |= 1 << bar;
3077
3078 return 0;
3079
3080 err_out:
3081 dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
3082 &pdev->resource[bar]);
3083 return -EBUSY;
3084 }
3085
3086 /**
3087 * pci_request_region - Reserve PCI I/O and memory resource
3088 * @pdev: PCI device whose resources are to be reserved
3089 * @bar: BAR to be reserved
3090 * @res_name: Name to be associated with resource
3091 *
3092 * Mark the PCI region associated with PCI device @pdev BAR @bar as
3093 * being reserved by owner @res_name. Do not access any
3094 * address inside the PCI regions unless this call returns
3095 * successfully.
3096 *
3097 * Returns 0 on success, or %EBUSY on error. A warning
3098 * message is also printed on failure.
3099 */
pci_request_region(struct pci_dev * pdev,int bar,const char * res_name)3100 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
3101 {
3102 return __pci_request_region(pdev, bar, res_name, 0);
3103 }
3104 EXPORT_SYMBOL(pci_request_region);
3105
3106 /**
3107 * pci_request_region_exclusive - Reserved PCI I/O and memory resource
3108 * @pdev: PCI device whose resources are to be reserved
3109 * @bar: BAR to be reserved
3110 * @res_name: Name to be associated with resource.
3111 *
3112 * Mark the PCI region associated with PCI device @pdev BR @bar as
3113 * being reserved by owner @res_name. Do not access any
3114 * address inside the PCI regions unless this call returns
3115 * successfully.
3116 *
3117 * Returns 0 on success, or %EBUSY on error. A warning
3118 * message is also printed on failure.
3119 *
3120 * The key difference that _exclusive makes it that userspace is
3121 * explicitly not allowed to map the resource via /dev/mem or
3122 * sysfs.
3123 */
pci_request_region_exclusive(struct pci_dev * pdev,int bar,const char * res_name)3124 int pci_request_region_exclusive(struct pci_dev *pdev, int bar,
3125 const char *res_name)
3126 {
3127 return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
3128 }
3129 EXPORT_SYMBOL(pci_request_region_exclusive);
3130
3131 /**
3132 * pci_release_selected_regions - Release selected PCI I/O and memory resources
3133 * @pdev: PCI device whose resources were previously reserved
3134 * @bars: Bitmask of BARs to be released
3135 *
3136 * Release selected PCI I/O and memory resources previously reserved.
3137 * Call this function only after all use of the PCI regions has ceased.
3138 */
pci_release_selected_regions(struct pci_dev * pdev,int bars)3139 void pci_release_selected_regions(struct pci_dev *pdev, int bars)
3140 {
3141 int i;
3142
3143 for (i = 0; i < 6; i++)
3144 if (bars & (1 << i))
3145 pci_release_region(pdev, i);
3146 }
3147 EXPORT_SYMBOL(pci_release_selected_regions);
3148
__pci_request_selected_regions(struct pci_dev * pdev,int bars,const char * res_name,int excl)3149 static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
3150 const char *res_name, int excl)
3151 {
3152 int i;
3153
3154 for (i = 0; i < 6; i++)
3155 if (bars & (1 << i))
3156 if (__pci_request_region(pdev, i, res_name, excl))
3157 goto err_out;
3158 return 0;
3159
3160 err_out:
3161 while (--i >= 0)
3162 if (bars & (1 << i))
3163 pci_release_region(pdev, i);
3164
3165 return -EBUSY;
3166 }
3167
3168
3169 /**
3170 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
3171 * @pdev: PCI device whose resources are to be reserved
3172 * @bars: Bitmask of BARs to be requested
3173 * @res_name: Name to be associated with resource
3174 */
pci_request_selected_regions(struct pci_dev * pdev,int bars,const char * res_name)3175 int pci_request_selected_regions(struct pci_dev *pdev, int bars,
3176 const char *res_name)
3177 {
3178 return __pci_request_selected_regions(pdev, bars, res_name, 0);
3179 }
3180 EXPORT_SYMBOL(pci_request_selected_regions);
3181
pci_request_selected_regions_exclusive(struct pci_dev * pdev,int bars,const char * res_name)3182 int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
3183 const char *res_name)
3184 {
3185 return __pci_request_selected_regions(pdev, bars, res_name,
3186 IORESOURCE_EXCLUSIVE);
3187 }
3188 EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3189
3190 /**
3191 * pci_release_regions - Release reserved PCI I/O and memory resources
3192 * @pdev: PCI device whose resources were previously reserved by pci_request_regions
3193 *
3194 * Releases all PCI I/O and memory resources previously reserved by a
3195 * successful call to pci_request_regions. Call this function only
3196 * after all use of the PCI regions has ceased.
3197 */
3198
pci_release_regions(struct pci_dev * pdev)3199 void pci_release_regions(struct pci_dev *pdev)
3200 {
3201 pci_release_selected_regions(pdev, (1 << 6) - 1);
3202 }
3203 EXPORT_SYMBOL(pci_release_regions);
3204
3205 /**
3206 * pci_request_regions - Reserved PCI I/O and memory resources
3207 * @pdev: PCI device whose resources are to be reserved
3208 * @res_name: Name to be associated with resource.
3209 *
3210 * Mark all PCI regions associated with PCI device @pdev as
3211 * being reserved by owner @res_name. Do not access any
3212 * address inside the PCI regions unless this call returns
3213 * successfully.
3214 *
3215 * Returns 0 on success, or %EBUSY on error. A warning
3216 * message is also printed on failure.
3217 */
pci_request_regions(struct pci_dev * pdev,const char * res_name)3218 int pci_request_regions(struct pci_dev *pdev, const char *res_name)
3219 {
3220 return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
3221 }
3222 EXPORT_SYMBOL(pci_request_regions);
3223
3224 /**
3225 * pci_request_regions_exclusive - Reserved PCI I/O and memory resources
3226 * @pdev: PCI device whose resources are to be reserved
3227 * @res_name: Name to be associated with resource.
3228 *
3229 * Mark all PCI regions associated with PCI device @pdev as
3230 * being reserved by owner @res_name. Do not access any
3231 * address inside the PCI regions unless this call returns
3232 * successfully.
3233 *
3234 * pci_request_regions_exclusive() will mark the region so that
3235 * /dev/mem and the sysfs MMIO access will not be allowed.
3236 *
3237 * Returns 0 on success, or %EBUSY on error. A warning
3238 * message is also printed on failure.
3239 */
pci_request_regions_exclusive(struct pci_dev * pdev,const char * res_name)3240 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
3241 {
3242 return pci_request_selected_regions_exclusive(pdev,
3243 ((1 << 6) - 1), res_name);
3244 }
3245 EXPORT_SYMBOL(pci_request_regions_exclusive);
3246
3247 #ifdef PCI_IOBASE
3248 struct io_range {
3249 struct list_head list;
3250 phys_addr_t start;
3251 resource_size_t size;
3252 };
3253
3254 static LIST_HEAD(io_range_list);
3255 static DEFINE_SPINLOCK(io_range_lock);
3256 #endif
3257
3258 /*
3259 * Record the PCI IO range (expressed as CPU physical address + size).
3260 * Return a negative value if an error has occured, zero otherwise
3261 */
pci_register_io_range(phys_addr_t addr,resource_size_t size)3262 int __weak pci_register_io_range(phys_addr_t addr, resource_size_t size)
3263 {
3264 int err = 0;
3265
3266 #ifdef PCI_IOBASE
3267 struct io_range *range;
3268 resource_size_t allocated_size = 0;
3269
3270 /* check if the range hasn't been previously recorded */
3271 spin_lock(&io_range_lock);
3272 list_for_each_entry(range, &io_range_list, list) {
3273 if (addr >= range->start && addr + size <= range->start + size) {
3274 /* range already registered, bail out */
3275 goto end_register;
3276 }
3277 allocated_size += range->size;
3278 }
3279
3280 /* range not registed yet, check for available space */
3281 if (allocated_size + size - 1 > IO_SPACE_LIMIT) {
3282 /* if it's too big check if 64K space can be reserved */
3283 if (allocated_size + SZ_64K - 1 > IO_SPACE_LIMIT) {
3284 err = -E2BIG;
3285 goto end_register;
3286 }
3287
3288 size = SZ_64K;
3289 pr_warn("Requested IO range too big, new size set to 64K\n");
3290 }
3291
3292 /* add the range to the list */
3293 range = kzalloc(sizeof(*range), GFP_ATOMIC);
3294 if (!range) {
3295 err = -ENOMEM;
3296 goto end_register;
3297 }
3298
3299 range->start = addr;
3300 range->size = size;
3301
3302 list_add_tail(&range->list, &io_range_list);
3303
3304 end_register:
3305 spin_unlock(&io_range_lock);
3306 #endif
3307
3308 return err;
3309 }
3310
pci_pio_to_address(unsigned long pio)3311 phys_addr_t pci_pio_to_address(unsigned long pio)
3312 {
3313 phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;
3314
3315 #ifdef PCI_IOBASE
3316 struct io_range *range;
3317 resource_size_t allocated_size = 0;
3318
3319 if (pio > IO_SPACE_LIMIT)
3320 return address;
3321
3322 spin_lock(&io_range_lock);
3323 list_for_each_entry(range, &io_range_list, list) {
3324 if (pio >= allocated_size && pio < allocated_size + range->size) {
3325 address = range->start + pio - allocated_size;
3326 break;
3327 }
3328 allocated_size += range->size;
3329 }
3330 spin_unlock(&io_range_lock);
3331 #endif
3332
3333 return address;
3334 }
3335
pci_address_to_pio(phys_addr_t address)3336 unsigned long __weak pci_address_to_pio(phys_addr_t address)
3337 {
3338 #ifdef PCI_IOBASE
3339 struct io_range *res;
3340 resource_size_t offset = 0;
3341 unsigned long addr = -1;
3342
3343 spin_lock(&io_range_lock);
3344 list_for_each_entry(res, &io_range_list, list) {
3345 if (address >= res->start && address < res->start + res->size) {
3346 addr = address - res->start + offset;
3347 break;
3348 }
3349 offset += res->size;
3350 }
3351 spin_unlock(&io_range_lock);
3352
3353 return addr;
3354 #else
3355 if (address > IO_SPACE_LIMIT)
3356 return (unsigned long)-1;
3357
3358 return (unsigned long) address;
3359 #endif
3360 }
3361
3362 /**
3363 * pci_remap_iospace - Remap the memory mapped I/O space
3364 * @res: Resource describing the I/O space
3365 * @phys_addr: physical address of range to be mapped
3366 *
3367 * Remap the memory mapped I/O space described by the @res
3368 * and the CPU physical address @phys_addr into virtual address space.
3369 * Only architectures that have memory mapped IO functions defined
3370 * (and the PCI_IOBASE value defined) should call this function.
3371 */
pci_remap_iospace(const struct resource * res,phys_addr_t phys_addr)3372 int __weak pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
3373 {
3374 #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
3375 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
3376
3377 if (!(res->flags & IORESOURCE_IO))
3378 return -EINVAL;
3379
3380 if (res->end > IO_SPACE_LIMIT)
3381 return -EINVAL;
3382
3383 return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
3384 pgprot_device(PAGE_KERNEL));
3385 #else
3386 /* this architecture does not have memory mapped I/O space,
3387 so this function should never be called */
3388 WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
3389 return -ENODEV;
3390 #endif
3391 }
3392
3393 /**
3394 * pci_unmap_iospace - Unmap the memory mapped I/O space
3395 * @res: resource to be unmapped
3396 *
3397 * Unmap the CPU virtual address @res from virtual address space.
3398 * Only architectures that have memory mapped IO functions defined
3399 * (and the PCI_IOBASE value defined) should call this function.
3400 */
pci_unmap_iospace(struct resource * res)3401 void pci_unmap_iospace(struct resource *res)
3402 {
3403 #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
3404 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
3405
3406 unmap_kernel_range(vaddr, resource_size(res));
3407 #endif
3408 }
3409
__pci_set_master(struct pci_dev * dev,bool enable)3410 static void __pci_set_master(struct pci_dev *dev, bool enable)
3411 {
3412 u16 old_cmd, cmd;
3413
3414 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
3415 if (enable)
3416 cmd = old_cmd | PCI_COMMAND_MASTER;
3417 else
3418 cmd = old_cmd & ~PCI_COMMAND_MASTER;
3419 if (cmd != old_cmd) {
3420 dev_dbg(&dev->dev, "%s bus mastering\n",
3421 enable ? "enabling" : "disabling");
3422 pci_write_config_word(dev, PCI_COMMAND, cmd);
3423 }
3424 dev->is_busmaster = enable;
3425 }
3426
3427 /**
3428 * pcibios_setup - process "pci=" kernel boot arguments
3429 * @str: string used to pass in "pci=" kernel boot arguments
3430 *
3431 * Process kernel boot arguments. This is the default implementation.
3432 * Architecture specific implementations can override this as necessary.
3433 */
pcibios_setup(char * str)3434 char * __weak __init pcibios_setup(char *str)
3435 {
3436 return str;
3437 }
3438
3439 /**
3440 * pcibios_set_master - enable PCI bus-mastering for device dev
3441 * @dev: the PCI device to enable
3442 *
3443 * Enables PCI bus-mastering for the device. This is the default
3444 * implementation. Architecture specific implementations can override
3445 * this if necessary.
3446 */
pcibios_set_master(struct pci_dev * dev)3447 void __weak pcibios_set_master(struct pci_dev *dev)
3448 {
3449 u8 lat;
3450
3451 /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
3452 if (pci_is_pcie(dev))
3453 return;
3454
3455 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
3456 if (lat < 16)
3457 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
3458 else if (lat > pcibios_max_latency)
3459 lat = pcibios_max_latency;
3460 else
3461 return;
3462
3463 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
3464 }
3465
3466 /**
3467 * pci_set_master - enables bus-mastering for device dev
3468 * @dev: the PCI device to enable
3469 *
3470 * Enables bus-mastering on the device and calls pcibios_set_master()
3471 * to do the needed arch specific settings.
3472 */
pci_set_master(struct pci_dev * dev)3473 void pci_set_master(struct pci_dev *dev)
3474 {
3475 __pci_set_master(dev, true);
3476 pcibios_set_master(dev);
3477 }
3478 EXPORT_SYMBOL(pci_set_master);
3479
3480 /**
3481 * pci_clear_master - disables bus-mastering for device dev
3482 * @dev: the PCI device to disable
3483 */
pci_clear_master(struct pci_dev * dev)3484 void pci_clear_master(struct pci_dev *dev)
3485 {
3486 __pci_set_master(dev, false);
3487 }
3488 EXPORT_SYMBOL(pci_clear_master);
3489
3490 /**
3491 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
3492 * @dev: the PCI device for which MWI is to be enabled
3493 *
3494 * Helper function for pci_set_mwi.
3495 * Originally copied from drivers/net/acenic.c.
3496 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
3497 *
3498 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
3499 */
pci_set_cacheline_size(struct pci_dev * dev)3500 int pci_set_cacheline_size(struct pci_dev *dev)
3501 {
3502 u8 cacheline_size;
3503
3504 if (!pci_cache_line_size)
3505 return -EINVAL;
3506
3507 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
3508 equal to or multiple of the right value. */
3509 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
3510 if (cacheline_size >= pci_cache_line_size &&
3511 (cacheline_size % pci_cache_line_size) == 0)
3512 return 0;
3513
3514 /* Write the correct value. */
3515 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
3516 /* Read it back. */
3517 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
3518 if (cacheline_size == pci_cache_line_size)
3519 return 0;
3520
3521 dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not supported\n",
3522 pci_cache_line_size << 2);
3523
3524 return -EINVAL;
3525 }
3526 EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
3527
3528 /**
3529 * pci_set_mwi - enables memory-write-invalidate PCI transaction
3530 * @dev: the PCI device for which MWI is enabled
3531 *
3532 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
3533 *
3534 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
3535 */
pci_set_mwi(struct pci_dev * dev)3536 int pci_set_mwi(struct pci_dev *dev)
3537 {
3538 #ifdef PCI_DISABLE_MWI
3539 return 0;
3540 #else
3541 int rc;
3542 u16 cmd;
3543
3544 rc = pci_set_cacheline_size(dev);
3545 if (rc)
3546 return rc;
3547
3548 pci_read_config_word(dev, PCI_COMMAND, &cmd);
3549 if (!(cmd & PCI_COMMAND_INVALIDATE)) {
3550 dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
3551 cmd |= PCI_COMMAND_INVALIDATE;
3552 pci_write_config_word(dev, PCI_COMMAND, cmd);
3553 }
3554 return 0;
3555 #endif
3556 }
3557 EXPORT_SYMBOL(pci_set_mwi);
3558
3559 /**
3560 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
3561 * @dev: the PCI device for which MWI is enabled
3562 *
3563 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
3564 * Callers are not required to check the return value.
3565 *
3566 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
3567 */
pci_try_set_mwi(struct pci_dev * dev)3568 int pci_try_set_mwi(struct pci_dev *dev)
3569 {
3570 #ifdef PCI_DISABLE_MWI
3571 return 0;
3572 #else
3573 return pci_set_mwi(dev);
3574 #endif
3575 }
3576 EXPORT_SYMBOL(pci_try_set_mwi);
3577
3578 /**
3579 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
3580 * @dev: the PCI device to disable
3581 *
3582 * Disables PCI Memory-Write-Invalidate transaction on the device
3583 */
pci_clear_mwi(struct pci_dev * dev)3584 void pci_clear_mwi(struct pci_dev *dev)
3585 {
3586 #ifndef PCI_DISABLE_MWI
3587 u16 cmd;
3588
3589 pci_read_config_word(dev, PCI_COMMAND, &cmd);
3590 if (cmd & PCI_COMMAND_INVALIDATE) {
3591 cmd &= ~PCI_COMMAND_INVALIDATE;
3592 pci_write_config_word(dev, PCI_COMMAND, cmd);
3593 }
3594 #endif
3595 }
3596 EXPORT_SYMBOL(pci_clear_mwi);
3597
3598 /**
3599 * pci_intx - enables/disables PCI INTx for device dev
3600 * @pdev: the PCI device to operate on
3601 * @enable: boolean: whether to enable or disable PCI INTx
3602 *
3603 * Enables/disables PCI INTx for device dev
3604 */
pci_intx(struct pci_dev * pdev,int enable)3605 void pci_intx(struct pci_dev *pdev, int enable)
3606 {
3607 u16 pci_command, new;
3608
3609 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
3610
3611 if (enable)
3612 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
3613 else
3614 new = pci_command | PCI_COMMAND_INTX_DISABLE;
3615
3616 if (new != pci_command) {
3617 struct pci_devres *dr;
3618
3619 pci_write_config_word(pdev, PCI_COMMAND, new);
3620
3621 dr = find_pci_dr(pdev);
3622 if (dr && !dr->restore_intx) {
3623 dr->restore_intx = 1;
3624 dr->orig_intx = !enable;
3625 }
3626 }
3627 }
3628 EXPORT_SYMBOL_GPL(pci_intx);
3629
3630 /**
3631 * pci_intx_mask_supported - probe for INTx masking support
3632 * @dev: the PCI device to operate on
3633 *
3634 * Check if the device dev support INTx masking via the config space
3635 * command word.
3636 */
pci_intx_mask_supported(struct pci_dev * dev)3637 bool pci_intx_mask_supported(struct pci_dev *dev)
3638 {
3639 bool mask_supported = false;
3640 u16 orig, new;
3641
3642 if (dev->broken_intx_masking)
3643 return false;
3644
3645 pci_cfg_access_lock(dev);
3646
3647 pci_read_config_word(dev, PCI_COMMAND, &orig);
3648 pci_write_config_word(dev, PCI_COMMAND,
3649 orig ^ PCI_COMMAND_INTX_DISABLE);
3650 pci_read_config_word(dev, PCI_COMMAND, &new);
3651
3652 /*
3653 * There's no way to protect against hardware bugs or detect them
3654 * reliably, but as long as we know what the value should be, let's
3655 * go ahead and check it.
3656 */
3657 if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
3658 dev_err(&dev->dev, "Command register changed from 0x%x to 0x%x: driver or hardware bug?\n",
3659 orig, new);
3660 } else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
3661 mask_supported = true;
3662 pci_write_config_word(dev, PCI_COMMAND, orig);
3663 }
3664
3665 pci_cfg_access_unlock(dev);
3666 return mask_supported;
3667 }
3668 EXPORT_SYMBOL_GPL(pci_intx_mask_supported);
3669
pci_check_and_set_intx_mask(struct pci_dev * dev,bool mask)3670 static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
3671 {
3672 struct pci_bus *bus = dev->bus;
3673 bool mask_updated = true;
3674 u32 cmd_status_dword;
3675 u16 origcmd, newcmd;
3676 unsigned long flags;
3677 bool irq_pending;
3678
3679 /*
3680 * We do a single dword read to retrieve both command and status.
3681 * Document assumptions that make this possible.
3682 */
3683 BUILD_BUG_ON(PCI_COMMAND % 4);
3684 BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
3685
3686 raw_spin_lock_irqsave(&pci_lock, flags);
3687
3688 bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
3689
3690 irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
3691
3692 /*
3693 * Check interrupt status register to see whether our device
3694 * triggered the interrupt (when masking) or the next IRQ is
3695 * already pending (when unmasking).
3696 */
3697 if (mask != irq_pending) {
3698 mask_updated = false;
3699 goto done;
3700 }
3701
3702 origcmd = cmd_status_dword;
3703 newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
3704 if (mask)
3705 newcmd |= PCI_COMMAND_INTX_DISABLE;
3706 if (newcmd != origcmd)
3707 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
3708
3709 done:
3710 raw_spin_unlock_irqrestore(&pci_lock, flags);
3711
3712 return mask_updated;
3713 }
3714
3715 /**
3716 * pci_check_and_mask_intx - mask INTx on pending interrupt
3717 * @dev: the PCI device to operate on
3718 *
3719 * Check if the device dev has its INTx line asserted, mask it and
3720 * return true in that case. False is returned if not interrupt was
3721 * pending.
3722 */
pci_check_and_mask_intx(struct pci_dev * dev)3723 bool pci_check_and_mask_intx(struct pci_dev *dev)
3724 {
3725 return pci_check_and_set_intx_mask(dev, true);
3726 }
3727 EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
3728
3729 /**
3730 * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending
3731 * @dev: the PCI device to operate on
3732 *
3733 * Check if the device dev has its INTx line asserted, unmask it if not
3734 * and return true. False is returned and the mask remains active if
3735 * there was still an interrupt pending.
3736 */
pci_check_and_unmask_intx(struct pci_dev * dev)3737 bool pci_check_and_unmask_intx(struct pci_dev *dev)
3738 {
3739 return pci_check_and_set_intx_mask(dev, false);
3740 }
3741 EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
3742
3743 /**
3744 * pci_wait_for_pending_transaction - waits for pending transaction
3745 * @dev: the PCI device to operate on
3746 *
3747 * Return 0 if transaction is pending 1 otherwise.
3748 */
pci_wait_for_pending_transaction(struct pci_dev * dev)3749 int pci_wait_for_pending_transaction(struct pci_dev *dev)
3750 {
3751 if (!pci_is_pcie(dev))
3752 return 1;
3753
3754 return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
3755 PCI_EXP_DEVSTA_TRPND);
3756 }
3757 EXPORT_SYMBOL(pci_wait_for_pending_transaction);
3758
pci_flr_wait(struct pci_dev * dev)3759 static void pci_flr_wait(struct pci_dev *dev)
3760 {
3761 int delay = 1, timeout = 60000;
3762 u32 id;
3763
3764 /*
3765 * Per PCIe r3.1, sec 6.6.2, a device must complete an FLR within
3766 * 100ms, but may silently discard requests while the FLR is in
3767 * progress. Wait 100ms before trying to access the device.
3768 */
3769 msleep(100);
3770
3771 /*
3772 * After 100ms, the device should not silently discard config
3773 * requests, but it may still indicate that it needs more time by
3774 * responding to them with CRS completions. The Root Port will
3775 * generally synthesize ~0 data to complete the read (except when
3776 * CRS SV is enabled and the read was for the Vendor ID; in that
3777 * case it synthesizes 0x0001 data).
3778 *
3779 * Wait for the device to return a non-CRS completion. Read the
3780 * Command register instead of Vendor ID so we don't have to
3781 * contend with the CRS SV value.
3782 */
3783 pci_read_config_dword(dev, PCI_COMMAND, &id);
3784 while (id == ~0) {
3785 if (delay > timeout) {
3786 dev_warn(&dev->dev, "not ready %dms after FLR; giving up\n",
3787 100 + delay - 1);
3788 return;
3789 }
3790
3791 if (delay > 1000)
3792 dev_info(&dev->dev, "not ready %dms after FLR; waiting\n",
3793 100 + delay - 1);
3794
3795 msleep(delay);
3796 delay *= 2;
3797 pci_read_config_dword(dev, PCI_COMMAND, &id);
3798 }
3799
3800 if (delay > 1000)
3801 dev_info(&dev->dev, "ready %dms after FLR\n", 100 + delay - 1);
3802 }
3803
pcie_flr(struct pci_dev * dev,int probe)3804 static int pcie_flr(struct pci_dev *dev, int probe)
3805 {
3806 u32 cap;
3807
3808 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
3809 if (!(cap & PCI_EXP_DEVCAP_FLR))
3810 return -ENOTTY;
3811
3812 if (probe)
3813 return 0;
3814
3815 if (!pci_wait_for_pending_transaction(dev))
3816 dev_err(&dev->dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
3817
3818 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
3819 pci_flr_wait(dev);
3820 return 0;
3821 }
3822
pci_af_flr(struct pci_dev * dev,int probe)3823 static int pci_af_flr(struct pci_dev *dev, int probe)
3824 {
3825 int pos;
3826 u8 cap;
3827
3828 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
3829 if (!pos)
3830 return -ENOTTY;
3831
3832 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
3833 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
3834 return -ENOTTY;
3835
3836 if (probe)
3837 return 0;
3838
3839 /*
3840 * Wait for Transaction Pending bit to clear. A word-aligned test
3841 * is used, so we use the conrol offset rather than status and shift
3842 * the test bit to match.
3843 */
3844 if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
3845 PCI_AF_STATUS_TP << 8))
3846 dev_err(&dev->dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
3847
3848 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
3849 pci_flr_wait(dev);
3850 return 0;
3851 }
3852
3853 /**
3854 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
3855 * @dev: Device to reset.
3856 * @probe: If set, only check if the device can be reset this way.
3857 *
3858 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
3859 * unset, it will be reinitialized internally when going from PCI_D3hot to
3860 * PCI_D0. If that's the case and the device is not in a low-power state
3861 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
3862 *
3863 * NOTE: This causes the caller to sleep for twice the device power transition
3864 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
3865 * by default (i.e. unless the @dev's d3_delay field has a different value).
3866 * Moreover, only devices in D0 can be reset by this function.
3867 */
pci_pm_reset(struct pci_dev * dev,int probe)3868 static int pci_pm_reset(struct pci_dev *dev, int probe)
3869 {
3870 u16 csr;
3871
3872 if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
3873 return -ENOTTY;
3874
3875 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
3876 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
3877 return -ENOTTY;
3878
3879 if (probe)
3880 return 0;
3881
3882 if (dev->current_state != PCI_D0)
3883 return -EINVAL;
3884
3885 csr &= ~PCI_PM_CTRL_STATE_MASK;
3886 csr |= PCI_D3hot;
3887 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
3888 pci_dev_d3_sleep(dev);
3889
3890 csr &= ~PCI_PM_CTRL_STATE_MASK;
3891 csr |= PCI_D0;
3892 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
3893 pci_dev_d3_sleep(dev);
3894
3895 return 0;
3896 }
3897
pci_reset_secondary_bus(struct pci_dev * dev)3898 void pci_reset_secondary_bus(struct pci_dev *dev)
3899 {
3900 u16 ctrl;
3901
3902 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
3903 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
3904 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
3905 /*
3906 * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms. Double
3907 * this to 2ms to ensure that we meet the minimum requirement.
3908 */
3909 msleep(2);
3910
3911 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
3912 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
3913
3914 /*
3915 * Trhfa for conventional PCI is 2^25 clock cycles.
3916 * Assuming a minimum 33MHz clock this results in a 1s
3917 * delay before we can consider subordinate devices to
3918 * be re-initialized. PCIe has some ways to shorten this,
3919 * but we don't make use of them yet.
3920 */
3921 ssleep(1);
3922 }
3923
pcibios_reset_secondary_bus(struct pci_dev * dev)3924 void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
3925 {
3926 pci_reset_secondary_bus(dev);
3927 }
3928
3929 /**
3930 * pci_reset_bridge_secondary_bus - Reset the secondary bus on a PCI bridge.
3931 * @dev: Bridge device
3932 *
3933 * Use the bridge control register to assert reset on the secondary bus.
3934 * Devices on the secondary bus are left in power-on state.
3935 */
pci_reset_bridge_secondary_bus(struct pci_dev * dev)3936 void pci_reset_bridge_secondary_bus(struct pci_dev *dev)
3937 {
3938 pcibios_reset_secondary_bus(dev);
3939 }
3940 EXPORT_SYMBOL_GPL(pci_reset_bridge_secondary_bus);
3941
pci_parent_bus_reset(struct pci_dev * dev,int probe)3942 static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
3943 {
3944 struct pci_dev *pdev;
3945
3946 if (pci_is_root_bus(dev->bus) || dev->subordinate ||
3947 !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
3948 return -ENOTTY;
3949
3950 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
3951 if (pdev != dev)
3952 return -ENOTTY;
3953
3954 if (probe)
3955 return 0;
3956
3957 pci_reset_bridge_secondary_bus(dev->bus->self);
3958
3959 return 0;
3960 }
3961
pci_reset_hotplug_slot(struct hotplug_slot * hotplug,int probe)3962 static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
3963 {
3964 int rc = -ENOTTY;
3965
3966 if (!hotplug || !try_module_get(hotplug->ops->owner))
3967 return rc;
3968
3969 if (hotplug->ops->reset_slot)
3970 rc = hotplug->ops->reset_slot(hotplug, probe);
3971
3972 module_put(hotplug->ops->owner);
3973
3974 return rc;
3975 }
3976
pci_dev_reset_slot_function(struct pci_dev * dev,int probe)3977 static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
3978 {
3979 struct pci_dev *pdev;
3980
3981 if (dev->subordinate || !dev->slot ||
3982 dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
3983 return -ENOTTY;
3984
3985 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
3986 if (pdev != dev && pdev->slot == dev->slot)
3987 return -ENOTTY;
3988
3989 return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
3990 }
3991
__pci_dev_reset(struct pci_dev * dev,int probe)3992 static int __pci_dev_reset(struct pci_dev *dev, int probe)
3993 {
3994 int rc;
3995
3996 might_sleep();
3997
3998 rc = pci_dev_specific_reset(dev, probe);
3999 if (rc != -ENOTTY)
4000 goto done;
4001
4002 rc = pcie_flr(dev, probe);
4003 if (rc != -ENOTTY)
4004 goto done;
4005
4006 rc = pci_af_flr(dev, probe);
4007 if (rc != -ENOTTY)
4008 goto done;
4009
4010 rc = pci_pm_reset(dev, probe);
4011 if (rc != -ENOTTY)
4012 goto done;
4013
4014 rc = pci_dev_reset_slot_function(dev, probe);
4015 if (rc != -ENOTTY)
4016 goto done;
4017
4018 rc = pci_parent_bus_reset(dev, probe);
4019 done:
4020 return rc;
4021 }
4022
pci_dev_lock(struct pci_dev * dev)4023 static void pci_dev_lock(struct pci_dev *dev)
4024 {
4025 pci_cfg_access_lock(dev);
4026 /* block PM suspend, driver probe, etc. */
4027 device_lock(&dev->dev);
4028 }
4029
4030 /* Return 1 on successful lock, 0 on contention */
pci_dev_trylock(struct pci_dev * dev)4031 static int pci_dev_trylock(struct pci_dev *dev)
4032 {
4033 if (pci_cfg_access_trylock(dev)) {
4034 if (device_trylock(&dev->dev))
4035 return 1;
4036 pci_cfg_access_unlock(dev);
4037 }
4038
4039 return 0;
4040 }
4041
pci_dev_unlock(struct pci_dev * dev)4042 static void pci_dev_unlock(struct pci_dev *dev)
4043 {
4044 device_unlock(&dev->dev);
4045 pci_cfg_access_unlock(dev);
4046 }
4047
4048 /**
4049 * pci_reset_notify - notify device driver of reset
4050 * @dev: device to be notified of reset
4051 * @prepare: 'true' if device is about to be reset; 'false' if reset attempt
4052 * completed
4053 *
4054 * Must be called prior to device access being disabled and after device
4055 * access is restored.
4056 */
pci_reset_notify(struct pci_dev * dev,bool prepare)4057 static void pci_reset_notify(struct pci_dev *dev, bool prepare)
4058 {
4059 const struct pci_error_handlers *err_handler =
4060 dev->driver ? dev->driver->err_handler : NULL;
4061 if (err_handler && err_handler->reset_notify)
4062 err_handler->reset_notify(dev, prepare);
4063 }
4064
pci_dev_save_and_disable(struct pci_dev * dev)4065 static void pci_dev_save_and_disable(struct pci_dev *dev)
4066 {
4067 pci_reset_notify(dev, true);
4068
4069 /*
4070 * Wake-up device prior to save. PM registers default to D0 after
4071 * reset and a simple register restore doesn't reliably return
4072 * to a non-D0 state anyway.
4073 */
4074 pci_set_power_state(dev, PCI_D0);
4075
4076 pci_save_state(dev);
4077 /*
4078 * Disable the device by clearing the Command register, except for
4079 * INTx-disable which is set. This not only disables MMIO and I/O port
4080 * BARs, but also prevents the device from being Bus Master, preventing
4081 * DMA from the device including MSI/MSI-X interrupts. For PCI 2.3
4082 * compliant devices, INTx-disable prevents legacy interrupts.
4083 */
4084 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
4085 }
4086
pci_dev_restore(struct pci_dev * dev)4087 static void pci_dev_restore(struct pci_dev *dev)
4088 {
4089 pci_restore_state(dev);
4090 pci_reset_notify(dev, false);
4091 }
4092
pci_dev_reset(struct pci_dev * dev,int probe)4093 static int pci_dev_reset(struct pci_dev *dev, int probe)
4094 {
4095 int rc;
4096
4097 if (!probe)
4098 pci_dev_lock(dev);
4099
4100 rc = __pci_dev_reset(dev, probe);
4101
4102 if (!probe)
4103 pci_dev_unlock(dev);
4104
4105 return rc;
4106 }
4107
4108 /**
4109 * __pci_reset_function - reset a PCI device function
4110 * @dev: PCI device to reset
4111 *
4112 * Some devices allow an individual function to be reset without affecting
4113 * other functions in the same device. The PCI device must be responsive
4114 * to PCI config space in order to use this function.
4115 *
4116 * The device function is presumed to be unused when this function is called.
4117 * Resetting the device will make the contents of PCI configuration space
4118 * random, so any caller of this must be prepared to reinitialise the
4119 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
4120 * etc.
4121 *
4122 * Returns 0 if the device function was successfully reset or negative if the
4123 * device doesn't support resetting a single function.
4124 */
__pci_reset_function(struct pci_dev * dev)4125 int __pci_reset_function(struct pci_dev *dev)
4126 {
4127 return pci_dev_reset(dev, 0);
4128 }
4129 EXPORT_SYMBOL_GPL(__pci_reset_function);
4130
4131 /**
4132 * __pci_reset_function_locked - reset a PCI device function while holding
4133 * the @dev mutex lock.
4134 * @dev: PCI device to reset
4135 *
4136 * Some devices allow an individual function to be reset without affecting
4137 * other functions in the same device. The PCI device must be responsive
4138 * to PCI config space in order to use this function.
4139 *
4140 * The device function is presumed to be unused and the caller is holding
4141 * the device mutex lock when this function is called.
4142 * Resetting the device will make the contents of PCI configuration space
4143 * random, so any caller of this must be prepared to reinitialise the
4144 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
4145 * etc.
4146 *
4147 * Returns 0 if the device function was successfully reset or negative if the
4148 * device doesn't support resetting a single function.
4149 */
__pci_reset_function_locked(struct pci_dev * dev)4150 int __pci_reset_function_locked(struct pci_dev *dev)
4151 {
4152 return __pci_dev_reset(dev, 0);
4153 }
4154 EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
4155
4156 /**
4157 * pci_probe_reset_function - check whether the device can be safely reset
4158 * @dev: PCI device to reset
4159 *
4160 * Some devices allow an individual function to be reset without affecting
4161 * other functions in the same device. The PCI device must be responsive
4162 * to PCI config space in order to use this function.
4163 *
4164 * Returns 0 if the device function can be reset or negative if the
4165 * device doesn't support resetting a single function.
4166 */
pci_probe_reset_function(struct pci_dev * dev)4167 int pci_probe_reset_function(struct pci_dev *dev)
4168 {
4169 return pci_dev_reset(dev, 1);
4170 }
4171
4172 /**
4173 * pci_reset_function - quiesce and reset a PCI device function
4174 * @dev: PCI device to reset
4175 *
4176 * Some devices allow an individual function to be reset without affecting
4177 * other functions in the same device. The PCI device must be responsive
4178 * to PCI config space in order to use this function.
4179 *
4180 * This function does not just reset the PCI portion of a device, but
4181 * clears all the state associated with the device. This function differs
4182 * from __pci_reset_function in that it saves and restores device state
4183 * over the reset.
4184 *
4185 * Returns 0 if the device function was successfully reset or negative if the
4186 * device doesn't support resetting a single function.
4187 */
pci_reset_function(struct pci_dev * dev)4188 int pci_reset_function(struct pci_dev *dev)
4189 {
4190 int rc;
4191
4192 rc = pci_dev_reset(dev, 1);
4193 if (rc)
4194 return rc;
4195
4196 pci_dev_save_and_disable(dev);
4197
4198 rc = pci_dev_reset(dev, 0);
4199
4200 pci_dev_restore(dev);
4201
4202 return rc;
4203 }
4204 EXPORT_SYMBOL_GPL(pci_reset_function);
4205
4206 /**
4207 * pci_try_reset_function - quiesce and reset a PCI device function
4208 * @dev: PCI device to reset
4209 *
4210 * Same as above, except return -EAGAIN if unable to lock device.
4211 */
pci_try_reset_function(struct pci_dev * dev)4212 int pci_try_reset_function(struct pci_dev *dev)
4213 {
4214 int rc;
4215
4216 rc = pci_dev_reset(dev, 1);
4217 if (rc)
4218 return rc;
4219
4220 pci_dev_save_and_disable(dev);
4221
4222 if (pci_dev_trylock(dev)) {
4223 rc = __pci_dev_reset(dev, 0);
4224 pci_dev_unlock(dev);
4225 } else
4226 rc = -EAGAIN;
4227
4228 pci_dev_restore(dev);
4229
4230 return rc;
4231 }
4232 EXPORT_SYMBOL_GPL(pci_try_reset_function);
4233
4234 /* Do any devices on or below this bus prevent a bus reset? */
pci_bus_resetable(struct pci_bus * bus)4235 static bool pci_bus_resetable(struct pci_bus *bus)
4236 {
4237 struct pci_dev *dev;
4238
4239
4240 if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
4241 return false;
4242
4243 list_for_each_entry(dev, &bus->devices, bus_list) {
4244 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
4245 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
4246 return false;
4247 }
4248
4249 return true;
4250 }
4251
4252 /* Lock devices from the top of the tree down */
pci_bus_lock(struct pci_bus * bus)4253 static void pci_bus_lock(struct pci_bus *bus)
4254 {
4255 struct pci_dev *dev;
4256
4257 list_for_each_entry(dev, &bus->devices, bus_list) {
4258 pci_dev_lock(dev);
4259 if (dev->subordinate)
4260 pci_bus_lock(dev->subordinate);
4261 }
4262 }
4263
4264 /* Unlock devices from the bottom of the tree up */
pci_bus_unlock(struct pci_bus * bus)4265 static void pci_bus_unlock(struct pci_bus *bus)
4266 {
4267 struct pci_dev *dev;
4268
4269 list_for_each_entry(dev, &bus->devices, bus_list) {
4270 if (dev->subordinate)
4271 pci_bus_unlock(dev->subordinate);
4272 pci_dev_unlock(dev);
4273 }
4274 }
4275
4276 /* Return 1 on successful lock, 0 on contention */
pci_bus_trylock(struct pci_bus * bus)4277 static int pci_bus_trylock(struct pci_bus *bus)
4278 {
4279 struct pci_dev *dev;
4280
4281 list_for_each_entry(dev, &bus->devices, bus_list) {
4282 if (!pci_dev_trylock(dev))
4283 goto unlock;
4284 if (dev->subordinate) {
4285 if (!pci_bus_trylock(dev->subordinate)) {
4286 pci_dev_unlock(dev);
4287 goto unlock;
4288 }
4289 }
4290 }
4291 return 1;
4292
4293 unlock:
4294 list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
4295 if (dev->subordinate)
4296 pci_bus_unlock(dev->subordinate);
4297 pci_dev_unlock(dev);
4298 }
4299 return 0;
4300 }
4301
4302 /* Do any devices on or below this slot prevent a bus reset? */
pci_slot_resetable(struct pci_slot * slot)4303 static bool pci_slot_resetable(struct pci_slot *slot)
4304 {
4305 struct pci_dev *dev;
4306
4307 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4308 if (!dev->slot || dev->slot != slot)
4309 continue;
4310 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
4311 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
4312 return false;
4313 }
4314
4315 return true;
4316 }
4317
4318 /* Lock devices from the top of the tree down */
pci_slot_lock(struct pci_slot * slot)4319 static void pci_slot_lock(struct pci_slot *slot)
4320 {
4321 struct pci_dev *dev;
4322
4323 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4324 if (!dev->slot || dev->slot != slot)
4325 continue;
4326 pci_dev_lock(dev);
4327 if (dev->subordinate)
4328 pci_bus_lock(dev->subordinate);
4329 }
4330 }
4331
4332 /* Unlock devices from the bottom of the tree up */
pci_slot_unlock(struct pci_slot * slot)4333 static void pci_slot_unlock(struct pci_slot *slot)
4334 {
4335 struct pci_dev *dev;
4336
4337 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4338 if (!dev->slot || dev->slot != slot)
4339 continue;
4340 if (dev->subordinate)
4341 pci_bus_unlock(dev->subordinate);
4342 pci_dev_unlock(dev);
4343 }
4344 }
4345
4346 /* Return 1 on successful lock, 0 on contention */
pci_slot_trylock(struct pci_slot * slot)4347 static int pci_slot_trylock(struct pci_slot *slot)
4348 {
4349 struct pci_dev *dev;
4350
4351 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4352 if (!dev->slot || dev->slot != slot)
4353 continue;
4354 if (!pci_dev_trylock(dev))
4355 goto unlock;
4356 if (dev->subordinate) {
4357 if (!pci_bus_trylock(dev->subordinate)) {
4358 pci_dev_unlock(dev);
4359 goto unlock;
4360 }
4361 }
4362 }
4363 return 1;
4364
4365 unlock:
4366 list_for_each_entry_continue_reverse(dev,
4367 &slot->bus->devices, bus_list) {
4368 if (!dev->slot || dev->slot != slot)
4369 continue;
4370 if (dev->subordinate)
4371 pci_bus_unlock(dev->subordinate);
4372 pci_dev_unlock(dev);
4373 }
4374 return 0;
4375 }
4376
4377 /* Save and disable devices from the top of the tree down */
pci_bus_save_and_disable(struct pci_bus * bus)4378 static void pci_bus_save_and_disable(struct pci_bus *bus)
4379 {
4380 struct pci_dev *dev;
4381
4382 list_for_each_entry(dev, &bus->devices, bus_list) {
4383 pci_dev_save_and_disable(dev);
4384 if (dev->subordinate)
4385 pci_bus_save_and_disable(dev->subordinate);
4386 }
4387 }
4388
4389 /*
4390 * Restore devices from top of the tree down - parent bridges need to be
4391 * restored before we can get to subordinate devices.
4392 */
pci_bus_restore(struct pci_bus * bus)4393 static void pci_bus_restore(struct pci_bus *bus)
4394 {
4395 struct pci_dev *dev;
4396
4397 list_for_each_entry(dev, &bus->devices, bus_list) {
4398 pci_dev_restore(dev);
4399 if (dev->subordinate)
4400 pci_bus_restore(dev->subordinate);
4401 }
4402 }
4403
4404 /* Save and disable devices from the top of the tree down */
pci_slot_save_and_disable(struct pci_slot * slot)4405 static void pci_slot_save_and_disable(struct pci_slot *slot)
4406 {
4407 struct pci_dev *dev;
4408
4409 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4410 if (!dev->slot || dev->slot != slot)
4411 continue;
4412 pci_dev_save_and_disable(dev);
4413 if (dev->subordinate)
4414 pci_bus_save_and_disable(dev->subordinate);
4415 }
4416 }
4417
4418 /*
4419 * Restore devices from top of the tree down - parent bridges need to be
4420 * restored before we can get to subordinate devices.
4421 */
pci_slot_restore(struct pci_slot * slot)4422 static void pci_slot_restore(struct pci_slot *slot)
4423 {
4424 struct pci_dev *dev;
4425
4426 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4427 if (!dev->slot || dev->slot != slot)
4428 continue;
4429 pci_dev_restore(dev);
4430 if (dev->subordinate)
4431 pci_bus_restore(dev->subordinate);
4432 }
4433 }
4434
pci_slot_reset(struct pci_slot * slot,int probe)4435 static int pci_slot_reset(struct pci_slot *slot, int probe)
4436 {
4437 int rc;
4438
4439 if (!slot || !pci_slot_resetable(slot))
4440 return -ENOTTY;
4441
4442 if (!probe)
4443 pci_slot_lock(slot);
4444
4445 might_sleep();
4446
4447 rc = pci_reset_hotplug_slot(slot->hotplug, probe);
4448
4449 if (!probe)
4450 pci_slot_unlock(slot);
4451
4452 return rc;
4453 }
4454
4455 /**
4456 * pci_probe_reset_slot - probe whether a PCI slot can be reset
4457 * @slot: PCI slot to probe
4458 *
4459 * Return 0 if slot can be reset, negative if a slot reset is not supported.
4460 */
pci_probe_reset_slot(struct pci_slot * slot)4461 int pci_probe_reset_slot(struct pci_slot *slot)
4462 {
4463 return pci_slot_reset(slot, 1);
4464 }
4465 EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
4466
4467 /**
4468 * pci_reset_slot - reset a PCI slot
4469 * @slot: PCI slot to reset
4470 *
4471 * A PCI bus may host multiple slots, each slot may support a reset mechanism
4472 * independent of other slots. For instance, some slots may support slot power
4473 * control. In the case of a 1:1 bus to slot architecture, this function may
4474 * wrap the bus reset to avoid spurious slot related events such as hotplug.
4475 * Generally a slot reset should be attempted before a bus reset. All of the
4476 * function of the slot and any subordinate buses behind the slot are reset
4477 * through this function. PCI config space of all devices in the slot and
4478 * behind the slot is saved before and restored after reset.
4479 *
4480 * Return 0 on success, non-zero on error.
4481 */
pci_reset_slot(struct pci_slot * slot)4482 int pci_reset_slot(struct pci_slot *slot)
4483 {
4484 int rc;
4485
4486 rc = pci_slot_reset(slot, 1);
4487 if (rc)
4488 return rc;
4489
4490 pci_slot_save_and_disable(slot);
4491
4492 rc = pci_slot_reset(slot, 0);
4493
4494 pci_slot_restore(slot);
4495
4496 return rc;
4497 }
4498 EXPORT_SYMBOL_GPL(pci_reset_slot);
4499
4500 /**
4501 * pci_try_reset_slot - Try to reset a PCI slot
4502 * @slot: PCI slot to reset
4503 *
4504 * Same as above except return -EAGAIN if the slot cannot be locked
4505 */
pci_try_reset_slot(struct pci_slot * slot)4506 int pci_try_reset_slot(struct pci_slot *slot)
4507 {
4508 int rc;
4509
4510 rc = pci_slot_reset(slot, 1);
4511 if (rc)
4512 return rc;
4513
4514 pci_slot_save_and_disable(slot);
4515
4516 if (pci_slot_trylock(slot)) {
4517 might_sleep();
4518 rc = pci_reset_hotplug_slot(slot->hotplug, 0);
4519 pci_slot_unlock(slot);
4520 } else
4521 rc = -EAGAIN;
4522
4523 pci_slot_restore(slot);
4524
4525 return rc;
4526 }
4527 EXPORT_SYMBOL_GPL(pci_try_reset_slot);
4528
pci_bus_reset(struct pci_bus * bus,int probe)4529 static int pci_bus_reset(struct pci_bus *bus, int probe)
4530 {
4531 if (!bus->self || !pci_bus_resetable(bus))
4532 return -ENOTTY;
4533
4534 if (probe)
4535 return 0;
4536
4537 pci_bus_lock(bus);
4538
4539 might_sleep();
4540
4541 pci_reset_bridge_secondary_bus(bus->self);
4542
4543 pci_bus_unlock(bus);
4544
4545 return 0;
4546 }
4547
4548 /**
4549 * pci_probe_reset_bus - probe whether a PCI bus can be reset
4550 * @bus: PCI bus to probe
4551 *
4552 * Return 0 if bus can be reset, negative if a bus reset is not supported.
4553 */
pci_probe_reset_bus(struct pci_bus * bus)4554 int pci_probe_reset_bus(struct pci_bus *bus)
4555 {
4556 return pci_bus_reset(bus, 1);
4557 }
4558 EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
4559
4560 /**
4561 * pci_reset_bus - reset a PCI bus
4562 * @bus: top level PCI bus to reset
4563 *
4564 * Do a bus reset on the given bus and any subordinate buses, saving
4565 * and restoring state of all devices.
4566 *
4567 * Return 0 on success, non-zero on error.
4568 */
pci_reset_bus(struct pci_bus * bus)4569 int pci_reset_bus(struct pci_bus *bus)
4570 {
4571 int rc;
4572
4573 rc = pci_bus_reset(bus, 1);
4574 if (rc)
4575 return rc;
4576
4577 pci_bus_save_and_disable(bus);
4578
4579 rc = pci_bus_reset(bus, 0);
4580
4581 pci_bus_restore(bus);
4582
4583 return rc;
4584 }
4585 EXPORT_SYMBOL_GPL(pci_reset_bus);
4586
4587 /**
4588 * pci_try_reset_bus - Try to reset a PCI bus
4589 * @bus: top level PCI bus to reset
4590 *
4591 * Same as above except return -EAGAIN if the bus cannot be locked
4592 */
pci_try_reset_bus(struct pci_bus * bus)4593 int pci_try_reset_bus(struct pci_bus *bus)
4594 {
4595 int rc;
4596
4597 rc = pci_bus_reset(bus, 1);
4598 if (rc)
4599 return rc;
4600
4601 pci_bus_save_and_disable(bus);
4602
4603 if (pci_bus_trylock(bus)) {
4604 might_sleep();
4605 pci_reset_bridge_secondary_bus(bus->self);
4606 pci_bus_unlock(bus);
4607 } else
4608 rc = -EAGAIN;
4609
4610 pci_bus_restore(bus);
4611
4612 return rc;
4613 }
4614 EXPORT_SYMBOL_GPL(pci_try_reset_bus);
4615
4616 /**
4617 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
4618 * @dev: PCI device to query
4619 *
4620 * Returns mmrbc: maximum designed memory read count in bytes
4621 * or appropriate error value.
4622 */
pcix_get_max_mmrbc(struct pci_dev * dev)4623 int pcix_get_max_mmrbc(struct pci_dev *dev)
4624 {
4625 int cap;
4626 u32 stat;
4627
4628 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
4629 if (!cap)
4630 return -EINVAL;
4631
4632 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
4633 return -EINVAL;
4634
4635 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
4636 }
4637 EXPORT_SYMBOL(pcix_get_max_mmrbc);
4638
4639 /**
4640 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
4641 * @dev: PCI device to query
4642 *
4643 * Returns mmrbc: maximum memory read count in bytes
4644 * or appropriate error value.
4645 */
pcix_get_mmrbc(struct pci_dev * dev)4646 int pcix_get_mmrbc(struct pci_dev *dev)
4647 {
4648 int cap;
4649 u16 cmd;
4650
4651 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
4652 if (!cap)
4653 return -EINVAL;
4654
4655 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
4656 return -EINVAL;
4657
4658 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
4659 }
4660 EXPORT_SYMBOL(pcix_get_mmrbc);
4661
4662 /**
4663 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
4664 * @dev: PCI device to query
4665 * @mmrbc: maximum memory read count in bytes
4666 * valid values are 512, 1024, 2048, 4096
4667 *
4668 * If possible sets maximum memory read byte count, some bridges have erratas
4669 * that prevent this.
4670 */
pcix_set_mmrbc(struct pci_dev * dev,int mmrbc)4671 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
4672 {
4673 int cap;
4674 u32 stat, v, o;
4675 u16 cmd;
4676
4677 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
4678 return -EINVAL;
4679
4680 v = ffs(mmrbc) - 10;
4681
4682 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
4683 if (!cap)
4684 return -EINVAL;
4685
4686 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
4687 return -EINVAL;
4688
4689 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
4690 return -E2BIG;
4691
4692 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
4693 return -EINVAL;
4694
4695 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
4696 if (o != v) {
4697 if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
4698 return -EIO;
4699
4700 cmd &= ~PCI_X_CMD_MAX_READ;
4701 cmd |= v << 2;
4702 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
4703 return -EIO;
4704 }
4705 return 0;
4706 }
4707 EXPORT_SYMBOL(pcix_set_mmrbc);
4708
4709 /**
4710 * pcie_get_readrq - get PCI Express read request size
4711 * @dev: PCI device to query
4712 *
4713 * Returns maximum memory read request in bytes
4714 * or appropriate error value.
4715 */
pcie_get_readrq(struct pci_dev * dev)4716 int pcie_get_readrq(struct pci_dev *dev)
4717 {
4718 u16 ctl;
4719
4720 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
4721
4722 return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4723 }
4724 EXPORT_SYMBOL(pcie_get_readrq);
4725
4726 /**
4727 * pcie_set_readrq - set PCI Express maximum memory read request
4728 * @dev: PCI device to query
4729 * @rq: maximum memory read count in bytes
4730 * valid values are 128, 256, 512, 1024, 2048, 4096
4731 *
4732 * If possible sets maximum memory read request in bytes
4733 */
pcie_set_readrq(struct pci_dev * dev,int rq)4734 int pcie_set_readrq(struct pci_dev *dev, int rq)
4735 {
4736 u16 v;
4737
4738 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
4739 return -EINVAL;
4740
4741 /*
4742 * If using the "performance" PCIe config, we clamp the
4743 * read rq size to the max packet size to prevent the
4744 * host bridge generating requests larger than we can
4745 * cope with
4746 */
4747 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
4748 int mps = pcie_get_mps(dev);
4749
4750 if (mps < rq)
4751 rq = mps;
4752 }
4753
4754 v = (ffs(rq) - 8) << 12;
4755
4756 return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
4757 PCI_EXP_DEVCTL_READRQ, v);
4758 }
4759 EXPORT_SYMBOL(pcie_set_readrq);
4760
4761 /**
4762 * pcie_get_mps - get PCI Express maximum payload size
4763 * @dev: PCI device to query
4764 *
4765 * Returns maximum payload size in bytes
4766 */
pcie_get_mps(struct pci_dev * dev)4767 int pcie_get_mps(struct pci_dev *dev)
4768 {
4769 u16 ctl;
4770
4771 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
4772
4773 return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4774 }
4775 EXPORT_SYMBOL(pcie_get_mps);
4776
4777 /**
4778 * pcie_set_mps - set PCI Express maximum payload size
4779 * @dev: PCI device to query
4780 * @mps: maximum payload size in bytes
4781 * valid values are 128, 256, 512, 1024, 2048, 4096
4782 *
4783 * If possible sets maximum payload size
4784 */
pcie_set_mps(struct pci_dev * dev,int mps)4785 int pcie_set_mps(struct pci_dev *dev, int mps)
4786 {
4787 u16 v;
4788
4789 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
4790 return -EINVAL;
4791
4792 v = ffs(mps) - 8;
4793 if (v > dev->pcie_mpss)
4794 return -EINVAL;
4795 v <<= 5;
4796
4797 return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
4798 PCI_EXP_DEVCTL_PAYLOAD, v);
4799 }
4800 EXPORT_SYMBOL(pcie_set_mps);
4801
4802 /**
4803 * pcie_get_minimum_link - determine minimum link settings of a PCI device
4804 * @dev: PCI device to query
4805 * @speed: storage for minimum speed
4806 * @width: storage for minimum width
4807 *
4808 * This function will walk up the PCI device chain and determine the minimum
4809 * link width and speed of the device.
4810 */
pcie_get_minimum_link(struct pci_dev * dev,enum pci_bus_speed * speed,enum pcie_link_width * width)4811 int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
4812 enum pcie_link_width *width)
4813 {
4814 int ret;
4815
4816 *speed = PCI_SPEED_UNKNOWN;
4817 *width = PCIE_LNK_WIDTH_UNKNOWN;
4818
4819 while (dev) {
4820 u16 lnksta;
4821 enum pci_bus_speed next_speed;
4822 enum pcie_link_width next_width;
4823
4824 ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
4825 if (ret)
4826 return ret;
4827
4828 next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
4829 next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
4830 PCI_EXP_LNKSTA_NLW_SHIFT;
4831
4832 if (next_speed < *speed)
4833 *speed = next_speed;
4834
4835 if (next_width < *width)
4836 *width = next_width;
4837
4838 dev = dev->bus->self;
4839 }
4840
4841 return 0;
4842 }
4843 EXPORT_SYMBOL(pcie_get_minimum_link);
4844
4845 /**
4846 * pci_select_bars - Make BAR mask from the type of resource
4847 * @dev: the PCI device for which BAR mask is made
4848 * @flags: resource type mask to be selected
4849 *
4850 * This helper routine makes bar mask from the type of resource.
4851 */
pci_select_bars(struct pci_dev * dev,unsigned long flags)4852 int pci_select_bars(struct pci_dev *dev, unsigned long flags)
4853 {
4854 int i, bars = 0;
4855 for (i = 0; i < PCI_NUM_RESOURCES; i++)
4856 if (pci_resource_flags(dev, i) & flags)
4857 bars |= (1 << i);
4858 return bars;
4859 }
4860 EXPORT_SYMBOL(pci_select_bars);
4861
4862 /* Some architectures require additional programming to enable VGA */
4863 static arch_set_vga_state_t arch_set_vga_state;
4864
pci_register_set_vga_state(arch_set_vga_state_t func)4865 void __init pci_register_set_vga_state(arch_set_vga_state_t func)
4866 {
4867 arch_set_vga_state = func; /* NULL disables */
4868 }
4869
pci_set_vga_state_arch(struct pci_dev * dev,bool decode,unsigned int command_bits,u32 flags)4870 static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
4871 unsigned int command_bits, u32 flags)
4872 {
4873 if (arch_set_vga_state)
4874 return arch_set_vga_state(dev, decode, command_bits,
4875 flags);
4876 return 0;
4877 }
4878
4879 /**
4880 * pci_set_vga_state - set VGA decode state on device and parents if requested
4881 * @dev: the PCI device
4882 * @decode: true = enable decoding, false = disable decoding
4883 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
4884 * @flags: traverse ancestors and change bridges
4885 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
4886 */
pci_set_vga_state(struct pci_dev * dev,bool decode,unsigned int command_bits,u32 flags)4887 int pci_set_vga_state(struct pci_dev *dev, bool decode,
4888 unsigned int command_bits, u32 flags)
4889 {
4890 struct pci_bus *bus;
4891 struct pci_dev *bridge;
4892 u16 cmd;
4893 int rc;
4894
4895 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
4896
4897 /* ARCH specific VGA enables */
4898 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
4899 if (rc)
4900 return rc;
4901
4902 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
4903 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4904 if (decode == true)
4905 cmd |= command_bits;
4906 else
4907 cmd &= ~command_bits;
4908 pci_write_config_word(dev, PCI_COMMAND, cmd);
4909 }
4910
4911 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
4912 return 0;
4913
4914 bus = dev->bus;
4915 while (bus) {
4916 bridge = bus->self;
4917 if (bridge) {
4918 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
4919 &cmd);
4920 if (decode == true)
4921 cmd |= PCI_BRIDGE_CTL_VGA;
4922 else
4923 cmd &= ~PCI_BRIDGE_CTL_VGA;
4924 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
4925 cmd);
4926 }
4927 bus = bus->parent;
4928 }
4929 return 0;
4930 }
4931
4932 /**
4933 * pci_add_dma_alias - Add a DMA devfn alias for a device
4934 * @dev: the PCI device for which alias is added
4935 * @devfn: alias slot and function
4936 *
4937 * This helper encodes 8-bit devfn as bit number in dma_alias_mask.
4938 * It should be called early, preferably as PCI fixup header quirk.
4939 */
pci_add_dma_alias(struct pci_dev * dev,u8 devfn)4940 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn)
4941 {
4942 if (!dev->dma_alias_mask)
4943 dev->dma_alias_mask = kcalloc(BITS_TO_LONGS(U8_MAX),
4944 sizeof(long), GFP_KERNEL);
4945 if (!dev->dma_alias_mask) {
4946 dev_warn(&dev->dev, "Unable to allocate DMA alias mask\n");
4947 return;
4948 }
4949
4950 set_bit(devfn, dev->dma_alias_mask);
4951 dev_info(&dev->dev, "Enabling fixed DMA alias to %02x.%d\n",
4952 PCI_SLOT(devfn), PCI_FUNC(devfn));
4953 }
4954
pci_devs_are_dma_aliases(struct pci_dev * dev1,struct pci_dev * dev2)4955 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
4956 {
4957 return (dev1->dma_alias_mask &&
4958 test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
4959 (dev2->dma_alias_mask &&
4960 test_bit(dev1->devfn, dev2->dma_alias_mask));
4961 }
4962
pci_device_is_present(struct pci_dev * pdev)4963 bool pci_device_is_present(struct pci_dev *pdev)
4964 {
4965 u32 v;
4966
4967 return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
4968 }
4969 EXPORT_SYMBOL_GPL(pci_device_is_present);
4970
pci_ignore_hotplug(struct pci_dev * dev)4971 void pci_ignore_hotplug(struct pci_dev *dev)
4972 {
4973 struct pci_dev *bridge = dev->bus->self;
4974
4975 dev->ignore_hotplug = 1;
4976 /* Propagate the "ignore hotplug" setting to the parent bridge. */
4977 if (bridge)
4978 bridge->ignore_hotplug = 1;
4979 }
4980 EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
4981
4982 #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
4983 static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
4984 static DEFINE_SPINLOCK(resource_alignment_lock);
4985
4986 /**
4987 * pci_specified_resource_alignment - get resource alignment specified by user.
4988 * @dev: the PCI device to get
4989 *
4990 * RETURNS: Resource alignment if it is specified.
4991 * Zero if it is not specified.
4992 */
pci_specified_resource_alignment(struct pci_dev * dev)4993 static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
4994 {
4995 int seg, bus, slot, func, align_order, count;
4996 unsigned short vendor, device, subsystem_vendor, subsystem_device;
4997 resource_size_t align = 0;
4998 char *p;
4999
5000 spin_lock(&resource_alignment_lock);
5001 p = resource_alignment_param;
5002 if (!*p)
5003 goto out;
5004 if (pci_has_flag(PCI_PROBE_ONLY)) {
5005 pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
5006 goto out;
5007 }
5008
5009 while (*p) {
5010 count = 0;
5011 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
5012 p[count] == '@') {
5013 p += count + 1;
5014 } else {
5015 align_order = -1;
5016 }
5017 if (strncmp(p, "pci:", 4) == 0) {
5018 /* PCI vendor/device (subvendor/subdevice) ids are specified */
5019 p += 4;
5020 if (sscanf(p, "%hx:%hx:%hx:%hx%n",
5021 &vendor, &device, &subsystem_vendor, &subsystem_device, &count) != 4) {
5022 if (sscanf(p, "%hx:%hx%n", &vendor, &device, &count) != 2) {
5023 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: pci:%s\n",
5024 p);
5025 break;
5026 }
5027 subsystem_vendor = subsystem_device = 0;
5028 }
5029 p += count;
5030 if ((!vendor || (vendor == dev->vendor)) &&
5031 (!device || (device == dev->device)) &&
5032 (!subsystem_vendor || (subsystem_vendor == dev->subsystem_vendor)) &&
5033 (!subsystem_device || (subsystem_device == dev->subsystem_device))) {
5034 if (align_order == -1)
5035 align = PAGE_SIZE;
5036 else
5037 align = 1 << align_order;
5038 /* Found */
5039 break;
5040 }
5041 }
5042 else {
5043 if (sscanf(p, "%x:%x:%x.%x%n",
5044 &seg, &bus, &slot, &func, &count) != 4) {
5045 seg = 0;
5046 if (sscanf(p, "%x:%x.%x%n",
5047 &bus, &slot, &func, &count) != 3) {
5048 /* Invalid format */
5049 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
5050 p);
5051 break;
5052 }
5053 }
5054 p += count;
5055 if (seg == pci_domain_nr(dev->bus) &&
5056 bus == dev->bus->number &&
5057 slot == PCI_SLOT(dev->devfn) &&
5058 func == PCI_FUNC(dev->devfn)) {
5059 if (align_order == -1)
5060 align = PAGE_SIZE;
5061 else
5062 align = 1 << align_order;
5063 /* Found */
5064 break;
5065 }
5066 }
5067 if (*p != ';' && *p != ',') {
5068 /* End of param or invalid format */
5069 break;
5070 }
5071 p++;
5072 }
5073 out:
5074 spin_unlock(&resource_alignment_lock);
5075 return align;
5076 }
5077
5078 /*
5079 * This function disables memory decoding and releases memory resources
5080 * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
5081 * It also rounds up size to specified alignment.
5082 * Later on, the kernel will assign page-aligned memory resource back
5083 * to the device.
5084 */
pci_reassigndev_resource_alignment(struct pci_dev * dev)5085 void pci_reassigndev_resource_alignment(struct pci_dev *dev)
5086 {
5087 int i;
5088 struct resource *r;
5089 resource_size_t align, size;
5090 u16 command;
5091
5092 /*
5093 * VF BARs are read-only zero according to SR-IOV spec r1.1, sec
5094 * 3.4.1.11. Their resources are allocated from the space
5095 * described by the VF BARx register in the PF's SR-IOV capability.
5096 * We can't influence their alignment here.
5097 */
5098 if (dev->is_virtfn)
5099 return;
5100
5101 /* check if specified PCI is target device to reassign */
5102 align = pci_specified_resource_alignment(dev);
5103 if (!align)
5104 return;
5105
5106 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
5107 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
5108 dev_warn(&dev->dev,
5109 "Can't reassign resources to host bridge.\n");
5110 return;
5111 }
5112
5113 dev_info(&dev->dev,
5114 "Disabling memory decoding and releasing memory resources.\n");
5115 pci_read_config_word(dev, PCI_COMMAND, &command);
5116 command &= ~PCI_COMMAND_MEMORY;
5117 pci_write_config_word(dev, PCI_COMMAND, command);
5118
5119 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
5120 r = &dev->resource[i];
5121 if (!(r->flags & IORESOURCE_MEM))
5122 continue;
5123 if (r->flags & IORESOURCE_PCI_FIXED) {
5124 dev_info(&dev->dev, "Ignoring requested alignment for BAR%d: %pR\n",
5125 i, r);
5126 continue;
5127 }
5128
5129 size = resource_size(r);
5130 if (size < align) {
5131 size = align;
5132 dev_info(&dev->dev,
5133 "Rounding up size of resource #%d to %#llx.\n",
5134 i, (unsigned long long)size);
5135 }
5136 r->flags |= IORESOURCE_UNSET;
5137 r->end = size - 1;
5138 r->start = 0;
5139 }
5140 /* Need to disable bridge's resource window,
5141 * to enable the kernel to reassign new resource
5142 * window later on.
5143 */
5144 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
5145 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
5146 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
5147 r = &dev->resource[i];
5148 if (!(r->flags & IORESOURCE_MEM))
5149 continue;
5150 r->flags |= IORESOURCE_UNSET;
5151 r->end = resource_size(r) - 1;
5152 r->start = 0;
5153 }
5154 pci_disable_bridge_window(dev);
5155 }
5156 }
5157
pci_set_resource_alignment_param(const char * buf,size_t count)5158 static ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
5159 {
5160 if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
5161 count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
5162 spin_lock(&resource_alignment_lock);
5163 strncpy(resource_alignment_param, buf, count);
5164 resource_alignment_param[count] = '\0';
5165 spin_unlock(&resource_alignment_lock);
5166 return count;
5167 }
5168
pci_get_resource_alignment_param(char * buf,size_t size)5169 static ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
5170 {
5171 size_t count;
5172 spin_lock(&resource_alignment_lock);
5173 count = snprintf(buf, size, "%s", resource_alignment_param);
5174 spin_unlock(&resource_alignment_lock);
5175 return count;
5176 }
5177
pci_resource_alignment_show(struct bus_type * bus,char * buf)5178 static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
5179 {
5180 return pci_get_resource_alignment_param(buf, PAGE_SIZE);
5181 }
5182
pci_resource_alignment_store(struct bus_type * bus,const char * buf,size_t count)5183 static ssize_t pci_resource_alignment_store(struct bus_type *bus,
5184 const char *buf, size_t count)
5185 {
5186 return pci_set_resource_alignment_param(buf, count);
5187 }
5188
5189 static BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
5190 pci_resource_alignment_store);
5191
pci_resource_alignment_sysfs_init(void)5192 static int __init pci_resource_alignment_sysfs_init(void)
5193 {
5194 return bus_create_file(&pci_bus_type,
5195 &bus_attr_resource_alignment);
5196 }
5197 late_initcall(pci_resource_alignment_sysfs_init);
5198
pci_no_domains(void)5199 static void pci_no_domains(void)
5200 {
5201 #ifdef CONFIG_PCI_DOMAINS
5202 pci_domains_supported = 0;
5203 #endif
5204 }
5205
5206 #ifdef CONFIG_PCI_DOMAINS
5207 static atomic_t __domain_nr = ATOMIC_INIT(-1);
5208
pci_get_new_domain_nr(void)5209 int pci_get_new_domain_nr(void)
5210 {
5211 return atomic_inc_return(&__domain_nr);
5212 }
5213
5214 #ifdef CONFIG_PCI_DOMAINS_GENERIC
of_pci_bus_find_domain_nr(struct device * parent)5215 static int of_pci_bus_find_domain_nr(struct device *parent)
5216 {
5217 static int use_dt_domains = -1;
5218 int domain = -1;
5219
5220 if (parent)
5221 domain = of_get_pci_domain_nr(parent->of_node);
5222 /*
5223 * Check DT domain and use_dt_domains values.
5224 *
5225 * If DT domain property is valid (domain >= 0) and
5226 * use_dt_domains != 0, the DT assignment is valid since this means
5227 * we have not previously allocated a domain number by using
5228 * pci_get_new_domain_nr(); we should also update use_dt_domains to
5229 * 1, to indicate that we have just assigned a domain number from
5230 * DT.
5231 *
5232 * If DT domain property value is not valid (ie domain < 0), and we
5233 * have not previously assigned a domain number from DT
5234 * (use_dt_domains != 1) we should assign a domain number by
5235 * using the:
5236 *
5237 * pci_get_new_domain_nr()
5238 *
5239 * API and update the use_dt_domains value to keep track of method we
5240 * are using to assign domain numbers (use_dt_domains = 0).
5241 *
5242 * All other combinations imply we have a platform that is trying
5243 * to mix domain numbers obtained from DT and pci_get_new_domain_nr(),
5244 * which is a recipe for domain mishandling and it is prevented by
5245 * invalidating the domain value (domain = -1) and printing a
5246 * corresponding error.
5247 */
5248 if (domain >= 0 && use_dt_domains) {
5249 use_dt_domains = 1;
5250 } else if (domain < 0 && use_dt_domains != 1) {
5251 use_dt_domains = 0;
5252 domain = pci_get_new_domain_nr();
5253 } else {
5254 dev_err(parent, "Node %s has inconsistent \"linux,pci-domain\" property in DT\n",
5255 parent->of_node->full_name);
5256 domain = -1;
5257 }
5258
5259 return domain;
5260 }
5261
pci_bus_find_domain_nr(struct pci_bus * bus,struct device * parent)5262 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
5263 {
5264 return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
5265 acpi_pci_bus_find_domain_nr(bus);
5266 }
5267 #endif
5268 #endif
5269
5270 /**
5271 * pci_ext_cfg_avail - can we access extended PCI config space?
5272 *
5273 * Returns 1 if we can access PCI extended config space (offsets
5274 * greater than 0xff). This is the default implementation. Architecture
5275 * implementations can override this.
5276 */
pci_ext_cfg_avail(void)5277 int __weak pci_ext_cfg_avail(void)
5278 {
5279 return 1;
5280 }
5281
pci_fixup_cardbus(struct pci_bus * bus)5282 void __weak pci_fixup_cardbus(struct pci_bus *bus)
5283 {
5284 }
5285 EXPORT_SYMBOL(pci_fixup_cardbus);
5286
pci_setup(char * str)5287 static int __init pci_setup(char *str)
5288 {
5289 while (str) {
5290 char *k = strchr(str, ',');
5291 if (k)
5292 *k++ = 0;
5293 if (*str && (str = pcibios_setup(str)) && *str) {
5294 if (!strcmp(str, "nomsi")) {
5295 pci_no_msi();
5296 } else if (!strcmp(str, "noaer")) {
5297 pci_no_aer();
5298 } else if (!strncmp(str, "realloc=", 8)) {
5299 pci_realloc_get_opt(str + 8);
5300 } else if (!strncmp(str, "realloc", 7)) {
5301 pci_realloc_get_opt("on");
5302 } else if (!strcmp(str, "nodomains")) {
5303 pci_no_domains();
5304 } else if (!strncmp(str, "noari", 5)) {
5305 pcie_ari_disabled = true;
5306 } else if (!strncmp(str, "cbiosize=", 9)) {
5307 pci_cardbus_io_size = memparse(str + 9, &str);
5308 } else if (!strncmp(str, "cbmemsize=", 10)) {
5309 pci_cardbus_mem_size = memparse(str + 10, &str);
5310 } else if (!strncmp(str, "resource_alignment=", 19)) {
5311 pci_set_resource_alignment_param(str + 19,
5312 strlen(str + 19));
5313 } else if (!strncmp(str, "ecrc=", 5)) {
5314 pcie_ecrc_get_policy(str + 5);
5315 } else if (!strncmp(str, "hpiosize=", 9)) {
5316 pci_hotplug_io_size = memparse(str + 9, &str);
5317 } else if (!strncmp(str, "hpmemsize=", 10)) {
5318 pci_hotplug_mem_size = memparse(str + 10, &str);
5319 } else if (!strncmp(str, "hpbussize=", 10)) {
5320 pci_hotplug_bus_size =
5321 simple_strtoul(str + 10, &str, 0);
5322 if (pci_hotplug_bus_size > 0xff)
5323 pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
5324 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
5325 pcie_bus_config = PCIE_BUS_TUNE_OFF;
5326 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
5327 pcie_bus_config = PCIE_BUS_SAFE;
5328 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
5329 pcie_bus_config = PCIE_BUS_PERFORMANCE;
5330 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
5331 pcie_bus_config = PCIE_BUS_PEER2PEER;
5332 } else if (!strncmp(str, "pcie_scan_all", 13)) {
5333 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
5334 } else {
5335 printk(KERN_ERR "PCI: Unknown option `%s'\n",
5336 str);
5337 }
5338 }
5339 str = k;
5340 }
5341 return 0;
5342 }
5343 early_param("pci", pci_setup);
5344