• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * PCI Bus Services, see include/linux/pci.h for further explanation.
4  *
5  * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
6  * David Mosberger-Tang
7  *
8  * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
9  */
10 
11 #include <linux/acpi.h>
12 #include <linux/kernel.h>
13 #include <linux/delay.h>
14 #include <linux/dmi.h>
15 #include <linux/init.h>
16 #include <linux/msi.h>
17 #include <linux/of.h>
18 #include <linux/pci.h>
19 #include <linux/pm.h>
20 #include <linux/slab.h>
21 #include <linux/module.h>
22 #include <linux/spinlock.h>
23 #include <linux/string.h>
24 #include <linux/log2.h>
25 #include <linux/logic_pio.h>
26 #include <linux/pm_wakeup.h>
27 #include <linux/interrupt.h>
28 #include <linux/device.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/pci_hotplug.h>
31 #include <linux/vmalloc.h>
32 #include <asm/dma.h>
33 #include <linux/aer.h>
34 #include "pci.h"
35 
36 DEFINE_MUTEX(pci_slot_mutex);
37 
38 const char *pci_power_names[] = {
39 	"error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
40 };
41 EXPORT_SYMBOL_GPL(pci_power_names);
42 
43 int isa_dma_bridge_buggy;
44 EXPORT_SYMBOL(isa_dma_bridge_buggy);
45 
46 int pci_pci_problems;
47 EXPORT_SYMBOL(pci_pci_problems);
48 
49 unsigned int pci_pm_d3hot_delay;
50 
51 static void pci_pme_list_scan(struct work_struct *work);
52 
53 static LIST_HEAD(pci_pme_list);
54 static DEFINE_MUTEX(pci_pme_list_mutex);
55 static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
56 
57 struct pci_pme_device {
58 	struct list_head list;
59 	struct pci_dev *dev;
60 };
61 
62 #define PME_TIMEOUT 1000 /* How long between PME checks */
63 
pci_dev_d3_sleep(struct pci_dev * dev)64 static void pci_dev_d3_sleep(struct pci_dev *dev)
65 {
66 	unsigned int delay = dev->d3hot_delay;
67 
68 	if (delay < pci_pm_d3hot_delay)
69 		delay = pci_pm_d3hot_delay;
70 
71 	if (delay)
72 		msleep(delay);
73 }
74 
75 #ifdef CONFIG_PCI_DOMAINS
76 int pci_domains_supported = 1;
77 #endif
78 
79 #define DEFAULT_CARDBUS_IO_SIZE		(256)
80 #define DEFAULT_CARDBUS_MEM_SIZE	(64*1024*1024)
81 /* pci=cbmemsize=nnM,cbiosize=nn can override this */
82 unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
83 unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
84 
85 #define DEFAULT_HOTPLUG_IO_SIZE		(256)
86 #define DEFAULT_HOTPLUG_MMIO_SIZE	(2*1024*1024)
87 #define DEFAULT_HOTPLUG_MMIO_PREF_SIZE	(2*1024*1024)
88 /* hpiosize=nn can override this */
89 unsigned long pci_hotplug_io_size  = DEFAULT_HOTPLUG_IO_SIZE;
90 /*
91  * pci=hpmmiosize=nnM overrides non-prefetchable MMIO size,
92  * pci=hpmmioprefsize=nnM overrides prefetchable MMIO size;
93  * pci=hpmemsize=nnM overrides both
94  */
95 unsigned long pci_hotplug_mmio_size = DEFAULT_HOTPLUG_MMIO_SIZE;
96 unsigned long pci_hotplug_mmio_pref_size = DEFAULT_HOTPLUG_MMIO_PREF_SIZE;
97 
98 #define DEFAULT_HOTPLUG_BUS_SIZE	1
99 unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
100 
101 
102 /* PCIe MPS/MRRS strategy; can be overridden by kernel command-line param */
103 #ifdef CONFIG_PCIE_BUS_TUNE_OFF
104 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
105 #elif defined CONFIG_PCIE_BUS_SAFE
106 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE;
107 #elif defined CONFIG_PCIE_BUS_PERFORMANCE
108 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE;
109 #elif defined CONFIG_PCIE_BUS_PEER2PEER
110 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PEER2PEER;
111 #else
112 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
113 #endif
114 
115 /*
116  * The default CLS is used if arch didn't set CLS explicitly and not
117  * all pci devices agree on the same value.  Arch can override either
118  * the dfl or actual value as it sees fit.  Don't forget this is
119  * measured in 32-bit words, not bytes.
120  */
121 u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
122 u8 pci_cache_line_size;
123 
124 /*
125  * If we set up a device for bus mastering, we need to check the latency
126  * timer as certain BIOSes forget to set it properly.
127  */
128 unsigned int pcibios_max_latency = 255;
129 
130 /* If set, the PCIe ARI capability will not be used. */
131 static bool pcie_ari_disabled;
132 
133 /* If set, the PCIe ATS capability will not be used. */
134 static bool pcie_ats_disabled;
135 
136 /* If set, the PCI config space of each device is printed during boot. */
137 bool pci_early_dump;
138 
pci_ats_disabled(void)139 bool pci_ats_disabled(void)
140 {
141 	return pcie_ats_disabled;
142 }
143 EXPORT_SYMBOL_GPL(pci_ats_disabled);
144 
145 /* Disable bridge_d3 for all PCIe ports */
146 static bool pci_bridge_d3_disable;
147 /* Force bridge_d3 for all PCIe ports */
148 static bool pci_bridge_d3_force;
149 
pcie_port_pm_setup(char * str)150 static int __init pcie_port_pm_setup(char *str)
151 {
152 	if (!strcmp(str, "off"))
153 		pci_bridge_d3_disable = true;
154 	else if (!strcmp(str, "force"))
155 		pci_bridge_d3_force = true;
156 	return 1;
157 }
158 __setup("pcie_port_pm=", pcie_port_pm_setup);
159 
160 /**
161  * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
162  * @bus: pointer to PCI bus structure to search
163  *
164  * Given a PCI bus, returns the highest PCI bus number present in the set
165  * including the given PCI bus and its list of child PCI buses.
166  */
pci_bus_max_busnr(struct pci_bus * bus)167 unsigned char pci_bus_max_busnr(struct pci_bus *bus)
168 {
169 	struct pci_bus *tmp;
170 	unsigned char max, n;
171 
172 	max = bus->busn_res.end;
173 	list_for_each_entry(tmp, &bus->children, node) {
174 		n = pci_bus_max_busnr(tmp);
175 		if (n > max)
176 			max = n;
177 	}
178 	return max;
179 }
180 EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
181 
182 /**
183  * pci_status_get_and_clear_errors - return and clear error bits in PCI_STATUS
184  * @pdev: the PCI device
185  *
186  * Returns error bits set in PCI_STATUS and clears them.
187  */
pci_status_get_and_clear_errors(struct pci_dev * pdev)188 int pci_status_get_and_clear_errors(struct pci_dev *pdev)
189 {
190 	u16 status;
191 	int ret;
192 
193 	ret = pci_read_config_word(pdev, PCI_STATUS, &status);
194 	if (ret != PCIBIOS_SUCCESSFUL)
195 		return -EIO;
196 
197 	status &= PCI_STATUS_ERROR_BITS;
198 	if (status)
199 		pci_write_config_word(pdev, PCI_STATUS, status);
200 
201 	return status;
202 }
203 EXPORT_SYMBOL_GPL(pci_status_get_and_clear_errors);
204 
205 #ifdef CONFIG_HAS_IOMEM
pci_ioremap_bar(struct pci_dev * pdev,int bar)206 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
207 {
208 	struct resource *res = &pdev->resource[bar];
209 
210 	/*
211 	 * Make sure the BAR is actually a memory resource, not an IO resource
212 	 */
213 	if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
214 		pci_warn(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
215 		return NULL;
216 	}
217 	return ioremap(res->start, resource_size(res));
218 }
219 EXPORT_SYMBOL_GPL(pci_ioremap_bar);
220 
pci_ioremap_wc_bar(struct pci_dev * pdev,int bar)221 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
222 {
223 	/*
224 	 * Make sure the BAR is actually a memory resource, not an IO resource
225 	 */
226 	if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
227 		WARN_ON(1);
228 		return NULL;
229 	}
230 	return ioremap_wc(pci_resource_start(pdev, bar),
231 			  pci_resource_len(pdev, bar));
232 }
233 EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
234 #endif
235 
236 /**
237  * pci_dev_str_match_path - test if a path string matches a device
238  * @dev: the PCI device to test
239  * @path: string to match the device against
240  * @endptr: pointer to the string after the match
241  *
242  * Test if a string (typically from a kernel parameter) formatted as a
243  * path of device/function addresses matches a PCI device. The string must
244  * be of the form:
245  *
246  *   [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
247  *
248  * A path for a device can be obtained using 'lspci -t'.  Using a path
249  * is more robust against bus renumbering than using only a single bus,
250  * device and function address.
251  *
252  * Returns 1 if the string matches the device, 0 if it does not and
253  * a negative error code if it fails to parse the string.
254  */
pci_dev_str_match_path(struct pci_dev * dev,const char * path,const char ** endptr)255 static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
256 				  const char **endptr)
257 {
258 	int ret;
259 	int seg, bus, slot, func;
260 	char *wpath, *p;
261 	char end;
262 
263 	*endptr = strchrnul(path, ';');
264 
265 	wpath = kmemdup_nul(path, *endptr - path, GFP_ATOMIC);
266 	if (!wpath)
267 		return -ENOMEM;
268 
269 	while (1) {
270 		p = strrchr(wpath, '/');
271 		if (!p)
272 			break;
273 		ret = sscanf(p, "/%x.%x%c", &slot, &func, &end);
274 		if (ret != 2) {
275 			ret = -EINVAL;
276 			goto free_and_exit;
277 		}
278 
279 		if (dev->devfn != PCI_DEVFN(slot, func)) {
280 			ret = 0;
281 			goto free_and_exit;
282 		}
283 
284 		/*
285 		 * Note: we don't need to get a reference to the upstream
286 		 * bridge because we hold a reference to the top level
287 		 * device which should hold a reference to the bridge,
288 		 * and so on.
289 		 */
290 		dev = pci_upstream_bridge(dev);
291 		if (!dev) {
292 			ret = 0;
293 			goto free_and_exit;
294 		}
295 
296 		*p = 0;
297 	}
298 
299 	ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot,
300 		     &func, &end);
301 	if (ret != 4) {
302 		seg = 0;
303 		ret = sscanf(wpath, "%x:%x.%x%c", &bus, &slot, &func, &end);
304 		if (ret != 3) {
305 			ret = -EINVAL;
306 			goto free_and_exit;
307 		}
308 	}
309 
310 	ret = (seg == pci_domain_nr(dev->bus) &&
311 	       bus == dev->bus->number &&
312 	       dev->devfn == PCI_DEVFN(slot, func));
313 
314 free_and_exit:
315 	kfree(wpath);
316 	return ret;
317 }
318 
319 /**
320  * pci_dev_str_match - test if a string matches a device
321  * @dev: the PCI device to test
322  * @p: string to match the device against
323  * @endptr: pointer to the string after the match
324  *
325  * Test if a string (typically from a kernel parameter) matches a specified
326  * PCI device. The string may be of one of the following formats:
327  *
328  *   [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
329  *   pci:<vendor>:<device>[:<subvendor>:<subdevice>]
330  *
331  * The first format specifies a PCI bus/device/function address which
332  * may change if new hardware is inserted, if motherboard firmware changes,
333  * or due to changes caused in kernel parameters. If the domain is
334  * left unspecified, it is taken to be 0.  In order to be robust against
335  * bus renumbering issues, a path of PCI device/function numbers may be used
336  * to address the specific device.  The path for a device can be determined
337  * through the use of 'lspci -t'.
338  *
339  * The second format matches devices using IDs in the configuration
340  * space which may match multiple devices in the system. A value of 0
341  * for any field will match all devices. (Note: this differs from
342  * in-kernel code that uses PCI_ANY_ID which is ~0; this is for
343  * legacy reasons and convenience so users don't have to specify
344  * FFFFFFFFs on the command line.)
345  *
346  * Returns 1 if the string matches the device, 0 if it does not and
347  * a negative error code if the string cannot be parsed.
348  */
pci_dev_str_match(struct pci_dev * dev,const char * p,const char ** endptr)349 static int pci_dev_str_match(struct pci_dev *dev, const char *p,
350 			     const char **endptr)
351 {
352 	int ret;
353 	int count;
354 	unsigned short vendor, device, subsystem_vendor, subsystem_device;
355 
356 	if (strncmp(p, "pci:", 4) == 0) {
357 		/* PCI vendor/device (subvendor/subdevice) IDs are specified */
358 		p += 4;
359 		ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device,
360 			     &subsystem_vendor, &subsystem_device, &count);
361 		if (ret != 4) {
362 			ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count);
363 			if (ret != 2)
364 				return -EINVAL;
365 
366 			subsystem_vendor = 0;
367 			subsystem_device = 0;
368 		}
369 
370 		p += count;
371 
372 		if ((!vendor || vendor == dev->vendor) &&
373 		    (!device || device == dev->device) &&
374 		    (!subsystem_vendor ||
375 			    subsystem_vendor == dev->subsystem_vendor) &&
376 		    (!subsystem_device ||
377 			    subsystem_device == dev->subsystem_device))
378 			goto found;
379 	} else {
380 		/*
381 		 * PCI Bus, Device, Function IDs are specified
382 		 * (optionally, may include a path of devfns following it)
383 		 */
384 		ret = pci_dev_str_match_path(dev, p, &p);
385 		if (ret < 0)
386 			return ret;
387 		else if (ret)
388 			goto found;
389 	}
390 
391 	*endptr = p;
392 	return 0;
393 
394 found:
395 	*endptr = p;
396 	return 1;
397 }
398 
__pci_find_next_cap_ttl(struct pci_bus * bus,unsigned int devfn,u8 pos,int cap,int * ttl)399 static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
400 				   u8 pos, int cap, int *ttl)
401 {
402 	u8 id;
403 	u16 ent;
404 
405 	pci_bus_read_config_byte(bus, devfn, pos, &pos);
406 
407 	while ((*ttl)--) {
408 		if (pos < 0x40)
409 			break;
410 		pos &= ~3;
411 		pci_bus_read_config_word(bus, devfn, pos, &ent);
412 
413 		id = ent & 0xff;
414 		if (id == 0xff)
415 			break;
416 		if (id == cap)
417 			return pos;
418 		pos = (ent >> 8);
419 	}
420 	return 0;
421 }
422 
__pci_find_next_cap(struct pci_bus * bus,unsigned int devfn,u8 pos,int cap)423 static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
424 			       u8 pos, int cap)
425 {
426 	int ttl = PCI_FIND_CAP_TTL;
427 
428 	return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
429 }
430 
pci_find_next_capability(struct pci_dev * dev,u8 pos,int cap)431 int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
432 {
433 	return __pci_find_next_cap(dev->bus, dev->devfn,
434 				   pos + PCI_CAP_LIST_NEXT, cap);
435 }
436 EXPORT_SYMBOL_GPL(pci_find_next_capability);
437 
__pci_bus_find_cap_start(struct pci_bus * bus,unsigned int devfn,u8 hdr_type)438 static int __pci_bus_find_cap_start(struct pci_bus *bus,
439 				    unsigned int devfn, u8 hdr_type)
440 {
441 	u16 status;
442 
443 	pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
444 	if (!(status & PCI_STATUS_CAP_LIST))
445 		return 0;
446 
447 	switch (hdr_type) {
448 	case PCI_HEADER_TYPE_NORMAL:
449 	case PCI_HEADER_TYPE_BRIDGE:
450 		return PCI_CAPABILITY_LIST;
451 	case PCI_HEADER_TYPE_CARDBUS:
452 		return PCI_CB_CAPABILITY_LIST;
453 	}
454 
455 	return 0;
456 }
457 
458 /**
459  * pci_find_capability - query for devices' capabilities
460  * @dev: PCI device to query
461  * @cap: capability code
462  *
463  * Tell if a device supports a given PCI capability.
464  * Returns the address of the requested capability structure within the
465  * device's PCI configuration space or 0 in case the device does not
466  * support it.  Possible values for @cap include:
467  *
468  *  %PCI_CAP_ID_PM           Power Management
469  *  %PCI_CAP_ID_AGP          Accelerated Graphics Port
470  *  %PCI_CAP_ID_VPD          Vital Product Data
471  *  %PCI_CAP_ID_SLOTID       Slot Identification
472  *  %PCI_CAP_ID_MSI          Message Signalled Interrupts
473  *  %PCI_CAP_ID_CHSWP        CompactPCI HotSwap
474  *  %PCI_CAP_ID_PCIX         PCI-X
475  *  %PCI_CAP_ID_EXP          PCI Express
476  */
pci_find_capability(struct pci_dev * dev,int cap)477 int pci_find_capability(struct pci_dev *dev, int cap)
478 {
479 	int pos;
480 
481 	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
482 	if (pos)
483 		pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
484 
485 	return pos;
486 }
487 EXPORT_SYMBOL(pci_find_capability);
488 
489 /**
490  * pci_bus_find_capability - query for devices' capabilities
491  * @bus: the PCI bus to query
492  * @devfn: PCI device to query
493  * @cap: capability code
494  *
495  * Like pci_find_capability() but works for PCI devices that do not have a
496  * pci_dev structure set up yet.
497  *
498  * Returns the address of the requested capability structure within the
499  * device's PCI configuration space or 0 in case the device does not
500  * support it.
501  */
pci_bus_find_capability(struct pci_bus * bus,unsigned int devfn,int cap)502 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
503 {
504 	int pos;
505 	u8 hdr_type;
506 
507 	pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
508 
509 	pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
510 	if (pos)
511 		pos = __pci_find_next_cap(bus, devfn, pos, cap);
512 
513 	return pos;
514 }
515 EXPORT_SYMBOL(pci_bus_find_capability);
516 
517 /**
518  * pci_find_next_ext_capability - Find an extended capability
519  * @dev: PCI device to query
520  * @start: address at which to start looking (0 to start at beginning of list)
521  * @cap: capability code
522  *
523  * Returns the address of the next matching extended capability structure
524  * within the device's PCI configuration space or 0 if the device does
525  * not support it.  Some capabilities can occur several times, e.g., the
526  * vendor-specific capability, and this provides a way to find them all.
527  */
pci_find_next_ext_capability(struct pci_dev * dev,int start,int cap)528 int pci_find_next_ext_capability(struct pci_dev *dev, int start, int cap)
529 {
530 	u32 header;
531 	int ttl;
532 	int pos = PCI_CFG_SPACE_SIZE;
533 
534 	/* minimum 8 bytes per capability */
535 	ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
536 
537 	if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
538 		return 0;
539 
540 	if (start)
541 		pos = start;
542 
543 	if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
544 		return 0;
545 
546 	/*
547 	 * If we have no capabilities, this is indicated by cap ID,
548 	 * cap version and next pointer all being 0.
549 	 */
550 	if (header == 0)
551 		return 0;
552 
553 	while (ttl-- > 0) {
554 		if (PCI_EXT_CAP_ID(header) == cap && pos != start)
555 			return pos;
556 
557 		pos = PCI_EXT_CAP_NEXT(header);
558 		if (pos < PCI_CFG_SPACE_SIZE)
559 			break;
560 
561 		if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
562 			break;
563 	}
564 
565 	return 0;
566 }
567 EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
568 
569 /**
570  * pci_find_ext_capability - Find an extended capability
571  * @dev: PCI device to query
572  * @cap: capability code
573  *
574  * Returns the address of the requested extended capability structure
575  * within the device's PCI configuration space or 0 if the device does
576  * not support it.  Possible values for @cap include:
577  *
578  *  %PCI_EXT_CAP_ID_ERR		Advanced Error Reporting
579  *  %PCI_EXT_CAP_ID_VC		Virtual Channel
580  *  %PCI_EXT_CAP_ID_DSN		Device Serial Number
581  *  %PCI_EXT_CAP_ID_PWR		Power Budgeting
582  */
pci_find_ext_capability(struct pci_dev * dev,int cap)583 int pci_find_ext_capability(struct pci_dev *dev, int cap)
584 {
585 	return pci_find_next_ext_capability(dev, 0, cap);
586 }
587 EXPORT_SYMBOL_GPL(pci_find_ext_capability);
588 
589 /**
590  * pci_get_dsn - Read and return the 8-byte Device Serial Number
591  * @dev: PCI device to query
592  *
593  * Looks up the PCI_EXT_CAP_ID_DSN and reads the 8 bytes of the Device Serial
594  * Number.
595  *
596  * Returns the DSN, or zero if the capability does not exist.
597  */
pci_get_dsn(struct pci_dev * dev)598 u64 pci_get_dsn(struct pci_dev *dev)
599 {
600 	u32 dword;
601 	u64 dsn;
602 	int pos;
603 
604 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN);
605 	if (!pos)
606 		return 0;
607 
608 	/*
609 	 * The Device Serial Number is two dwords offset 4 bytes from the
610 	 * capability position. The specification says that the first dword is
611 	 * the lower half, and the second dword is the upper half.
612 	 */
613 	pos += 4;
614 	pci_read_config_dword(dev, pos, &dword);
615 	dsn = (u64)dword;
616 	pci_read_config_dword(dev, pos + 4, &dword);
617 	dsn |= ((u64)dword) << 32;
618 
619 	return dsn;
620 }
621 EXPORT_SYMBOL_GPL(pci_get_dsn);
622 
__pci_find_next_ht_cap(struct pci_dev * dev,int pos,int ht_cap)623 static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
624 {
625 	int rc, ttl = PCI_FIND_CAP_TTL;
626 	u8 cap, mask;
627 
628 	if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
629 		mask = HT_3BIT_CAP_MASK;
630 	else
631 		mask = HT_5BIT_CAP_MASK;
632 
633 	pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
634 				      PCI_CAP_ID_HT, &ttl);
635 	while (pos) {
636 		rc = pci_read_config_byte(dev, pos + 3, &cap);
637 		if (rc != PCIBIOS_SUCCESSFUL)
638 			return 0;
639 
640 		if ((cap & mask) == ht_cap)
641 			return pos;
642 
643 		pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
644 					      pos + PCI_CAP_LIST_NEXT,
645 					      PCI_CAP_ID_HT, &ttl);
646 	}
647 
648 	return 0;
649 }
650 /**
651  * pci_find_next_ht_capability - query a device's Hypertransport capabilities
652  * @dev: PCI device to query
653  * @pos: Position from which to continue searching
654  * @ht_cap: Hypertransport capability code
655  *
656  * To be used in conjunction with pci_find_ht_capability() to search for
657  * all capabilities matching @ht_cap. @pos should always be a value returned
658  * from pci_find_ht_capability().
659  *
660  * NB. To be 100% safe against broken PCI devices, the caller should take
661  * steps to avoid an infinite loop.
662  */
pci_find_next_ht_capability(struct pci_dev * dev,int pos,int ht_cap)663 int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
664 {
665 	return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
666 }
667 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
668 
669 /**
670  * pci_find_ht_capability - query a device's Hypertransport capabilities
671  * @dev: PCI device to query
672  * @ht_cap: Hypertransport capability code
673  *
674  * Tell if a device supports a given Hypertransport capability.
675  * Returns an address within the device's PCI configuration space
676  * or 0 in case the device does not support the request capability.
677  * The address points to the PCI capability, of type PCI_CAP_ID_HT,
678  * which has a Hypertransport capability matching @ht_cap.
679  */
pci_find_ht_capability(struct pci_dev * dev,int ht_cap)680 int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
681 {
682 	int pos;
683 
684 	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
685 	if (pos)
686 		pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
687 
688 	return pos;
689 }
690 EXPORT_SYMBOL_GPL(pci_find_ht_capability);
691 
692 /**
693  * pci_find_parent_resource - return resource region of parent bus of given
694  *			      region
695  * @dev: PCI device structure contains resources to be searched
696  * @res: child resource record for which parent is sought
697  *
698  * For given resource region of given device, return the resource region of
699  * parent bus the given region is contained in.
700  */
pci_find_parent_resource(const struct pci_dev * dev,struct resource * res)701 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
702 					  struct resource *res)
703 {
704 	const struct pci_bus *bus = dev->bus;
705 	struct resource *r;
706 	int i;
707 
708 	pci_bus_for_each_resource(bus, r, i) {
709 		if (!r)
710 			continue;
711 		if (resource_contains(r, res)) {
712 
713 			/*
714 			 * If the window is prefetchable but the BAR is
715 			 * not, the allocator made a mistake.
716 			 */
717 			if (r->flags & IORESOURCE_PREFETCH &&
718 			    !(res->flags & IORESOURCE_PREFETCH))
719 				return NULL;
720 
721 			/*
722 			 * If we're below a transparent bridge, there may
723 			 * be both a positively-decoded aperture and a
724 			 * subtractively-decoded region that contain the BAR.
725 			 * We want the positively-decoded one, so this depends
726 			 * on pci_bus_for_each_resource() giving us those
727 			 * first.
728 			 */
729 			return r;
730 		}
731 	}
732 	return NULL;
733 }
734 EXPORT_SYMBOL(pci_find_parent_resource);
735 
736 /**
737  * pci_find_resource - Return matching PCI device resource
738  * @dev: PCI device to query
739  * @res: Resource to look for
740  *
741  * Goes over standard PCI resources (BARs) and checks if the given resource
742  * is partially or fully contained in any of them. In that case the
743  * matching resource is returned, %NULL otherwise.
744  */
pci_find_resource(struct pci_dev * dev,struct resource * res)745 struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
746 {
747 	int i;
748 
749 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
750 		struct resource *r = &dev->resource[i];
751 
752 		if (r->start && resource_contains(r, res))
753 			return r;
754 	}
755 
756 	return NULL;
757 }
758 EXPORT_SYMBOL(pci_find_resource);
759 
760 /**
761  * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
762  * @dev: the PCI device to operate on
763  * @pos: config space offset of status word
764  * @mask: mask of bit(s) to care about in status word
765  *
766  * Return 1 when mask bit(s) in status word clear, 0 otherwise.
767  */
pci_wait_for_pending(struct pci_dev * dev,int pos,u16 mask)768 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
769 {
770 	int i;
771 
772 	/* Wait for Transaction Pending bit clean */
773 	for (i = 0; i < 4; i++) {
774 		u16 status;
775 		if (i)
776 			msleep((1 << (i - 1)) * 100);
777 
778 		pci_read_config_word(dev, pos, &status);
779 		if (!(status & mask))
780 			return 1;
781 	}
782 
783 	return 0;
784 }
785 
786 static int pci_acs_enable;
787 
788 /**
789  * pci_request_acs - ask for ACS to be enabled if supported
790  */
pci_request_acs(void)791 void pci_request_acs(void)
792 {
793 	pci_acs_enable = 1;
794 }
795 
796 static const char *disable_acs_redir_param;
797 
798 /**
799  * pci_disable_acs_redir - disable ACS redirect capabilities
800  * @dev: the PCI device
801  *
802  * For only devices specified in the disable_acs_redir parameter.
803  */
pci_disable_acs_redir(struct pci_dev * dev)804 static void pci_disable_acs_redir(struct pci_dev *dev)
805 {
806 	int ret = 0;
807 	const char *p;
808 	int pos;
809 	u16 ctrl;
810 
811 	if (!disable_acs_redir_param)
812 		return;
813 
814 	p = disable_acs_redir_param;
815 	while (*p) {
816 		ret = pci_dev_str_match(dev, p, &p);
817 		if (ret < 0) {
818 			pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n",
819 				     disable_acs_redir_param);
820 
821 			break;
822 		} else if (ret == 1) {
823 			/* Found a match */
824 			break;
825 		}
826 
827 		if (*p != ';' && *p != ',') {
828 			/* End of param or invalid format */
829 			break;
830 		}
831 		p++;
832 	}
833 
834 	if (ret != 1)
835 		return;
836 
837 	if (!pci_dev_specific_disable_acs_redir(dev))
838 		return;
839 
840 	pos = dev->acs_cap;
841 	if (!pos) {
842 		pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n");
843 		return;
844 	}
845 
846 	pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
847 
848 	/* P2P Request & Completion Redirect */
849 	ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
850 
851 	pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
852 
853 	pci_info(dev, "disabled ACS redirect\n");
854 }
855 
856 /**
857  * pci_std_enable_acs - enable ACS on devices using standard ACS capabilities
858  * @dev: the PCI device
859  */
pci_std_enable_acs(struct pci_dev * dev)860 static void pci_std_enable_acs(struct pci_dev *dev)
861 {
862 	int pos;
863 	u16 cap;
864 	u16 ctrl;
865 
866 	pos = dev->acs_cap;
867 	if (!pos)
868 		return;
869 
870 	pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
871 	pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
872 
873 	/* Source Validation */
874 	ctrl |= (cap & PCI_ACS_SV);
875 
876 	/* P2P Request Redirect */
877 	ctrl |= (cap & PCI_ACS_RR);
878 
879 	/* P2P Completion Redirect */
880 	ctrl |= (cap & PCI_ACS_CR);
881 
882 	/* Upstream Forwarding */
883 	ctrl |= (cap & PCI_ACS_UF);
884 
885 	/* Enable Translation Blocking for external devices */
886 	if (dev->external_facing || dev->untrusted)
887 		ctrl |= (cap & PCI_ACS_TB);
888 
889 	pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
890 }
891 
892 /**
893  * pci_enable_acs - enable ACS if hardware support it
894  * @dev: the PCI device
895  */
pci_enable_acs(struct pci_dev * dev)896 static void pci_enable_acs(struct pci_dev *dev)
897 {
898 	if (!pci_acs_enable)
899 		goto disable_acs_redir;
900 
901 	if (!pci_dev_specific_enable_acs(dev))
902 		goto disable_acs_redir;
903 
904 	pci_std_enable_acs(dev);
905 
906 disable_acs_redir:
907 	/*
908 	 * Note: pci_disable_acs_redir() must be called even if ACS was not
909 	 * enabled by the kernel because it may have been enabled by
910 	 * platform firmware.  So if we are told to disable it, we should
911 	 * always disable it after setting the kernel's default
912 	 * preferences.
913 	 */
914 	pci_disable_acs_redir(dev);
915 }
916 
917 /**
918  * pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
919  * @dev: PCI device to have its BARs restored
920  *
921  * Restore the BAR values for a given device, so as to make it
922  * accessible by its driver.
923  */
pci_restore_bars(struct pci_dev * dev)924 static void pci_restore_bars(struct pci_dev *dev)
925 {
926 	int i;
927 
928 	for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
929 		pci_update_resource(dev, i);
930 }
931 
932 static const struct pci_platform_pm_ops *pci_platform_pm;
933 
pci_set_platform_pm(const struct pci_platform_pm_ops * ops)934 int pci_set_platform_pm(const struct pci_platform_pm_ops *ops)
935 {
936 	if (!ops->is_manageable || !ops->set_state  || !ops->get_state ||
937 	    !ops->choose_state  || !ops->set_wakeup || !ops->need_resume)
938 		return -EINVAL;
939 	pci_platform_pm = ops;
940 	return 0;
941 }
942 
platform_pci_power_manageable(struct pci_dev * dev)943 static inline bool platform_pci_power_manageable(struct pci_dev *dev)
944 {
945 	return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
946 }
947 
platform_pci_set_power_state(struct pci_dev * dev,pci_power_t t)948 static inline int platform_pci_set_power_state(struct pci_dev *dev,
949 					       pci_power_t t)
950 {
951 	return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
952 }
953 
platform_pci_get_power_state(struct pci_dev * dev)954 static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
955 {
956 	return pci_platform_pm ? pci_platform_pm->get_state(dev) : PCI_UNKNOWN;
957 }
958 
platform_pci_refresh_power_state(struct pci_dev * dev)959 static inline void platform_pci_refresh_power_state(struct pci_dev *dev)
960 {
961 	if (pci_platform_pm && pci_platform_pm->refresh_state)
962 		pci_platform_pm->refresh_state(dev);
963 }
964 
platform_pci_choose_state(struct pci_dev * dev)965 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
966 {
967 	return pci_platform_pm ?
968 			pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
969 }
970 
platform_pci_set_wakeup(struct pci_dev * dev,bool enable)971 static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable)
972 {
973 	return pci_platform_pm ?
974 			pci_platform_pm->set_wakeup(dev, enable) : -ENODEV;
975 }
976 
platform_pci_need_resume(struct pci_dev * dev)977 static inline bool platform_pci_need_resume(struct pci_dev *dev)
978 {
979 	return pci_platform_pm ? pci_platform_pm->need_resume(dev) : false;
980 }
981 
platform_pci_bridge_d3(struct pci_dev * dev)982 static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
983 {
984 	if (pci_platform_pm && pci_platform_pm->bridge_d3)
985 		return pci_platform_pm->bridge_d3(dev);
986 	return false;
987 }
988 
989 /**
990  * pci_raw_set_power_state - Use PCI PM registers to set the power state of
991  *			     given PCI device
992  * @dev: PCI device to handle.
993  * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
994  *
995  * RETURN VALUE:
996  * -EINVAL if the requested state is invalid.
997  * -EIO if device does not support PCI PM or its PM capabilities register has a
998  * wrong version, or device doesn't support the requested state.
999  * 0 if device already is in the requested state.
1000  * 0 if device's power state has been successfully changed.
1001  */
pci_raw_set_power_state(struct pci_dev * dev,pci_power_t state)1002 static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
1003 {
1004 	u16 pmcsr;
1005 	bool need_restore = false;
1006 
1007 	/* Check if we're already there */
1008 	if (dev->current_state == state)
1009 		return 0;
1010 
1011 	if (!dev->pm_cap)
1012 		return -EIO;
1013 
1014 	if (state < PCI_D0 || state > PCI_D3hot)
1015 		return -EINVAL;
1016 
1017 	/*
1018 	 * Validate transition: We can enter D0 from any state, but if
1019 	 * we're already in a low-power state, we can only go deeper.  E.g.,
1020 	 * we can go from D1 to D3, but we can't go directly from D3 to D1;
1021 	 * we'd have to go from D3 to D0, then to D1.
1022 	 */
1023 	if (state != PCI_D0 && dev->current_state <= PCI_D3cold
1024 	    && dev->current_state > state) {
1025 		pci_err(dev, "invalid power transition (from %s to %s)\n",
1026 			pci_power_name(dev->current_state),
1027 			pci_power_name(state));
1028 		return -EINVAL;
1029 	}
1030 
1031 	/* Check if this device supports the desired state */
1032 	if ((state == PCI_D1 && !dev->d1_support)
1033 	   || (state == PCI_D2 && !dev->d2_support))
1034 		return -EIO;
1035 
1036 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1037 	if (pmcsr == (u16) ~0) {
1038 		pci_err(dev, "can't change power state from %s to %s (config space inaccessible)\n",
1039 			pci_power_name(dev->current_state),
1040 			pci_power_name(state));
1041 		return -EIO;
1042 	}
1043 
1044 	/*
1045 	 * If we're (effectively) in D3, force entire word to 0.
1046 	 * This doesn't affect PME_Status, disables PME_En, and
1047 	 * sets PowerState to 0.
1048 	 */
1049 	switch (dev->current_state) {
1050 	case PCI_D0:
1051 	case PCI_D1:
1052 	case PCI_D2:
1053 		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1054 		pmcsr |= state;
1055 		break;
1056 	case PCI_D3hot:
1057 	case PCI_D3cold:
1058 	case PCI_UNKNOWN: /* Boot-up */
1059 		if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
1060 		 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
1061 			need_restore = true;
1062 		fallthrough;	/* force to D0 */
1063 	default:
1064 		pmcsr = 0;
1065 		break;
1066 	}
1067 
1068 	/* Enter specified state */
1069 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1070 
1071 	/*
1072 	 * Mandatory power management transition delays; see PCI PM 1.1
1073 	 * 5.6.1 table 18
1074 	 */
1075 	if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
1076 		pci_dev_d3_sleep(dev);
1077 	else if (state == PCI_D2 || dev->current_state == PCI_D2)
1078 		udelay(PCI_PM_D2_DELAY);
1079 
1080 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1081 	dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1082 	if (dev->current_state != state)
1083 		pci_info_ratelimited(dev, "refused to change power state from %s to %s\n",
1084 			 pci_power_name(dev->current_state),
1085 			 pci_power_name(state));
1086 
1087 	/*
1088 	 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
1089 	 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
1090 	 * from D3hot to D0 _may_ perform an internal reset, thereby
1091 	 * going to "D0 Uninitialized" rather than "D0 Initialized".
1092 	 * For example, at least some versions of the 3c905B and the
1093 	 * 3c556B exhibit this behaviour.
1094 	 *
1095 	 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
1096 	 * devices in a D3hot state at boot.  Consequently, we need to
1097 	 * restore at least the BARs so that the device will be
1098 	 * accessible to its driver.
1099 	 */
1100 	if (need_restore)
1101 		pci_restore_bars(dev);
1102 
1103 	if (dev->bus->self)
1104 		pcie_aspm_pm_state_change(dev->bus->self);
1105 
1106 	return 0;
1107 }
1108 
1109 /**
1110  * pci_update_current_state - Read power state of given device and cache it
1111  * @dev: PCI device to handle.
1112  * @state: State to cache in case the device doesn't have the PM capability
1113  *
1114  * The power state is read from the PMCSR register, which however is
1115  * inaccessible in D3cold.  The platform firmware is therefore queried first
1116  * to detect accessibility of the register.  In case the platform firmware
1117  * reports an incorrect state or the device isn't power manageable by the
1118  * platform at all, we try to detect D3cold by testing accessibility of the
1119  * vendor ID in config space.
1120  */
pci_update_current_state(struct pci_dev * dev,pci_power_t state)1121 void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
1122 {
1123 	if (platform_pci_get_power_state(dev) == PCI_D3cold ||
1124 	    !pci_device_is_present(dev)) {
1125 		dev->current_state = PCI_D3cold;
1126 	} else if (dev->pm_cap) {
1127 		u16 pmcsr;
1128 
1129 		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1130 		dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1131 	} else {
1132 		dev->current_state = state;
1133 	}
1134 }
1135 
1136 /**
1137  * pci_refresh_power_state - Refresh the given device's power state data
1138  * @dev: Target PCI device.
1139  *
1140  * Ask the platform to refresh the devices power state information and invoke
1141  * pci_update_current_state() to update its current PCI power state.
1142  */
pci_refresh_power_state(struct pci_dev * dev)1143 void pci_refresh_power_state(struct pci_dev *dev)
1144 {
1145 	if (platform_pci_power_manageable(dev))
1146 		platform_pci_refresh_power_state(dev);
1147 
1148 	pci_update_current_state(dev, dev->current_state);
1149 }
1150 
1151 /**
1152  * pci_platform_power_transition - Use platform to change device power state
1153  * @dev: PCI device to handle.
1154  * @state: State to put the device into.
1155  */
pci_platform_power_transition(struct pci_dev * dev,pci_power_t state)1156 int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
1157 {
1158 	int error;
1159 
1160 	if (platform_pci_power_manageable(dev)) {
1161 		error = platform_pci_set_power_state(dev, state);
1162 		if (!error)
1163 			pci_update_current_state(dev, state);
1164 	} else
1165 		error = -ENODEV;
1166 
1167 	if (error && !dev->pm_cap) /* Fall back to PCI_D0 */
1168 		dev->current_state = PCI_D0;
1169 
1170 	return error;
1171 }
1172 EXPORT_SYMBOL_GPL(pci_platform_power_transition);
1173 
1174 /**
1175  * pci_wakeup - Wake up a PCI device
1176  * @pci_dev: Device to handle.
1177  * @ign: ignored parameter
1178  */
pci_wakeup(struct pci_dev * pci_dev,void * ign)1179 static int pci_wakeup(struct pci_dev *pci_dev, void *ign)
1180 {
1181 	pci_wakeup_event(pci_dev);
1182 	pm_request_resume(&pci_dev->dev);
1183 	return 0;
1184 }
1185 
1186 /**
1187  * pci_wakeup_bus - Walk given bus and wake up devices on it
1188  * @bus: Top bus of the subtree to walk.
1189  */
pci_wakeup_bus(struct pci_bus * bus)1190 void pci_wakeup_bus(struct pci_bus *bus)
1191 {
1192 	if (bus)
1193 		pci_walk_bus(bus, pci_wakeup, NULL);
1194 }
1195 
pci_dev_wait(struct pci_dev * dev,char * reset_type,int timeout)1196 static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
1197 {
1198 	int delay = 1;
1199 	u32 id;
1200 
1201 	/*
1202 	 * After reset, the device should not silently discard config
1203 	 * requests, but it may still indicate that it needs more time by
1204 	 * responding to them with CRS completions.  The Root Port will
1205 	 * generally synthesize ~0 data to complete the read (except when
1206 	 * CRS SV is enabled and the read was for the Vendor ID; in that
1207 	 * case it synthesizes 0x0001 data).
1208 	 *
1209 	 * Wait for the device to return a non-CRS completion.  Read the
1210 	 * Command register instead of Vendor ID so we don't have to
1211 	 * contend with the CRS SV value.
1212 	 */
1213 	pci_read_config_dword(dev, PCI_COMMAND, &id);
1214 	while (id == ~0) {
1215 		if (delay > timeout) {
1216 			pci_warn(dev, "not ready %dms after %s; giving up\n",
1217 				 delay - 1, reset_type);
1218 			return -ENOTTY;
1219 		}
1220 
1221 		if (delay > PCI_RESET_WAIT)
1222 			pci_info(dev, "not ready %dms after %s; waiting\n",
1223 				 delay - 1, reset_type);
1224 
1225 		msleep(delay);
1226 		delay *= 2;
1227 		pci_read_config_dword(dev, PCI_COMMAND, &id);
1228 	}
1229 
1230 	if (delay > PCI_RESET_WAIT)
1231 		pci_info(dev, "ready %dms after %s\n", delay - 1,
1232 			 reset_type);
1233 
1234 	return 0;
1235 }
1236 
1237 /**
1238  * pci_power_up - Put the given device into D0
1239  * @dev: PCI device to power up
1240  */
pci_power_up(struct pci_dev * dev)1241 int pci_power_up(struct pci_dev *dev)
1242 {
1243 	pci_platform_power_transition(dev, PCI_D0);
1244 
1245 	/*
1246 	 * Mandatory power management transition delays are handled in
1247 	 * pci_pm_resume_noirq() and pci_pm_runtime_resume() of the
1248 	 * corresponding bridge.
1249 	 */
1250 	if (dev->runtime_d3cold) {
1251 		/*
1252 		 * When powering on a bridge from D3cold, the whole hierarchy
1253 		 * may be powered on into D0uninitialized state, resume them to
1254 		 * give them a chance to suspend again
1255 		 */
1256 		pci_wakeup_bus(dev->subordinate);
1257 	}
1258 
1259 	return pci_raw_set_power_state(dev, PCI_D0);
1260 }
1261 
1262 /**
1263  * __pci_dev_set_current_state - Set current state of a PCI device
1264  * @dev: Device to handle
1265  * @data: pointer to state to be set
1266  */
__pci_dev_set_current_state(struct pci_dev * dev,void * data)1267 static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
1268 {
1269 	pci_power_t state = *(pci_power_t *)data;
1270 
1271 	dev->current_state = state;
1272 	return 0;
1273 }
1274 
1275 /**
1276  * pci_bus_set_current_state - Walk given bus and set current state of devices
1277  * @bus: Top bus of the subtree to walk.
1278  * @state: state to be set
1279  */
pci_bus_set_current_state(struct pci_bus * bus,pci_power_t state)1280 void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
1281 {
1282 	if (bus)
1283 		pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1284 }
1285 
1286 /**
1287  * pci_set_power_state - Set the power state of a PCI device
1288  * @dev: PCI device to handle.
1289  * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
1290  *
1291  * Transition a device to a new power state, using the platform firmware and/or
1292  * the device's PCI PM registers.
1293  *
1294  * RETURN VALUE:
1295  * -EINVAL if the requested state is invalid.
1296  * -EIO if device does not support PCI PM or its PM capabilities register has a
1297  * wrong version, or device doesn't support the requested state.
1298  * 0 if the transition is to D1 or D2 but D1 and D2 are not supported.
1299  * 0 if device already is in the requested state.
1300  * 0 if the transition is to D3 but D3 is not supported.
1301  * 0 if device's power state has been successfully changed.
1302  */
pci_set_power_state(struct pci_dev * dev,pci_power_t state)1303 int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
1304 {
1305 	int error;
1306 
1307 	/* Bound the state we're entering */
1308 	if (state > PCI_D3cold)
1309 		state = PCI_D3cold;
1310 	else if (state < PCI_D0)
1311 		state = PCI_D0;
1312 	else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
1313 
1314 		/*
1315 		 * If the device or the parent bridge do not support PCI
1316 		 * PM, ignore the request if we're doing anything other
1317 		 * than putting it into D0 (which would only happen on
1318 		 * boot).
1319 		 */
1320 		return 0;
1321 
1322 	/* Check if we're already there */
1323 	if (dev->current_state == state)
1324 		return 0;
1325 
1326 	if (state == PCI_D0)
1327 		return pci_power_up(dev);
1328 
1329 	/*
1330 	 * This device is quirked not to be put into D3, so don't put it in
1331 	 * D3
1332 	 */
1333 	if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
1334 		return 0;
1335 
1336 	/*
1337 	 * To put device in D3cold, we put device into D3hot in native
1338 	 * way, then put device into D3cold with platform ops
1339 	 */
1340 	error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
1341 					PCI_D3hot : state);
1342 
1343 	if (pci_platform_power_transition(dev, state))
1344 		return error;
1345 
1346 	/* Powering off a bridge may power off the whole hierarchy */
1347 	if (state == PCI_D3cold)
1348 		pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
1349 
1350 	return 0;
1351 }
1352 EXPORT_SYMBOL(pci_set_power_state);
1353 
1354 /**
1355  * pci_choose_state - Choose the power state of a PCI device
1356  * @dev: PCI device to be suspended
1357  * @state: target sleep state for the whole system. This is the value
1358  *	   that is passed to suspend() function.
1359  *
1360  * Returns PCI power state suitable for given device and given system
1361  * message.
1362  */
pci_choose_state(struct pci_dev * dev,pm_message_t state)1363 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
1364 {
1365 	pci_power_t ret;
1366 
1367 	if (!dev->pm_cap)
1368 		return PCI_D0;
1369 
1370 	ret = platform_pci_choose_state(dev);
1371 	if (ret != PCI_POWER_ERROR)
1372 		return ret;
1373 
1374 	switch (state.event) {
1375 	case PM_EVENT_ON:
1376 		return PCI_D0;
1377 	case PM_EVENT_FREEZE:
1378 	case PM_EVENT_PRETHAW:
1379 		/* REVISIT both freeze and pre-thaw "should" use D0 */
1380 	case PM_EVENT_SUSPEND:
1381 	case PM_EVENT_HIBERNATE:
1382 		return PCI_D3hot;
1383 	default:
1384 		pci_info(dev, "unrecognized suspend event %d\n",
1385 			 state.event);
1386 		BUG();
1387 	}
1388 	return PCI_D0;
1389 }
1390 EXPORT_SYMBOL(pci_choose_state);
1391 
1392 #define PCI_EXP_SAVE_REGS	7
1393 
_pci_find_saved_cap(struct pci_dev * pci_dev,u16 cap,bool extended)1394 static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
1395 						       u16 cap, bool extended)
1396 {
1397 	struct pci_cap_saved_state *tmp;
1398 
1399 	hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
1400 		if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
1401 			return tmp;
1402 	}
1403 	return NULL;
1404 }
1405 
pci_find_saved_cap(struct pci_dev * dev,char cap)1406 struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
1407 {
1408 	return _pci_find_saved_cap(dev, cap, false);
1409 }
1410 
pci_find_saved_ext_cap(struct pci_dev * dev,u16 cap)1411 struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
1412 {
1413 	return _pci_find_saved_cap(dev, cap, true);
1414 }
1415 
pci_save_pcie_state(struct pci_dev * dev)1416 static int pci_save_pcie_state(struct pci_dev *dev)
1417 {
1418 	int i = 0;
1419 	struct pci_cap_saved_state *save_state;
1420 	u16 *cap;
1421 
1422 	if (!pci_is_pcie(dev))
1423 		return 0;
1424 
1425 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1426 	if (!save_state) {
1427 		pci_err(dev, "buffer not found in %s\n", __func__);
1428 		return -ENOMEM;
1429 	}
1430 
1431 	cap = (u16 *)&save_state->cap.data[0];
1432 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
1433 	pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
1434 	pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
1435 	pcie_capability_read_word(dev, PCI_EXP_RTCTL,  &cap[i++]);
1436 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
1437 	pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
1438 	pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
1439 
1440 	return 0;
1441 }
1442 
pci_restore_pcie_state(struct pci_dev * dev)1443 static void pci_restore_pcie_state(struct pci_dev *dev)
1444 {
1445 	int i = 0;
1446 	struct pci_cap_saved_state *save_state;
1447 	u16 *cap;
1448 
1449 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1450 	if (!save_state)
1451 		return;
1452 
1453 	cap = (u16 *)&save_state->cap.data[0];
1454 	pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
1455 	pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
1456 	pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
1457 	pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
1458 	pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
1459 	pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
1460 	pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
1461 }
1462 
pci_save_pcix_state(struct pci_dev * dev)1463 static int pci_save_pcix_state(struct pci_dev *dev)
1464 {
1465 	int pos;
1466 	struct pci_cap_saved_state *save_state;
1467 
1468 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1469 	if (!pos)
1470 		return 0;
1471 
1472 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1473 	if (!save_state) {
1474 		pci_err(dev, "buffer not found in %s\n", __func__);
1475 		return -ENOMEM;
1476 	}
1477 
1478 	pci_read_config_word(dev, pos + PCI_X_CMD,
1479 			     (u16 *)save_state->cap.data);
1480 
1481 	return 0;
1482 }
1483 
pci_restore_pcix_state(struct pci_dev * dev)1484 static void pci_restore_pcix_state(struct pci_dev *dev)
1485 {
1486 	int i = 0, pos;
1487 	struct pci_cap_saved_state *save_state;
1488 	u16 *cap;
1489 
1490 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1491 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1492 	if (!save_state || !pos)
1493 		return;
1494 	cap = (u16 *)&save_state->cap.data[0];
1495 
1496 	pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1497 }
1498 
pci_save_ltr_state(struct pci_dev * dev)1499 static void pci_save_ltr_state(struct pci_dev *dev)
1500 {
1501 	int ltr;
1502 	struct pci_cap_saved_state *save_state;
1503 	u16 *cap;
1504 
1505 	if (!pci_is_pcie(dev))
1506 		return;
1507 
1508 	ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1509 	if (!ltr)
1510 		return;
1511 
1512 	save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1513 	if (!save_state) {
1514 		pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n");
1515 		return;
1516 	}
1517 
1518 	cap = (u16 *)&save_state->cap.data[0];
1519 	pci_read_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap++);
1520 	pci_read_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, cap++);
1521 }
1522 
pci_restore_ltr_state(struct pci_dev * dev)1523 static void pci_restore_ltr_state(struct pci_dev *dev)
1524 {
1525 	struct pci_cap_saved_state *save_state;
1526 	int ltr;
1527 	u16 *cap;
1528 
1529 	save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1530 	ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1531 	if (!save_state || !ltr)
1532 		return;
1533 
1534 	cap = (u16 *)&save_state->cap.data[0];
1535 	pci_write_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap++);
1536 	pci_write_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, *cap++);
1537 }
1538 
1539 /**
1540  * pci_save_state - save the PCI configuration space of a device before
1541  *		    suspending
1542  * @dev: PCI device that we're dealing with
1543  */
pci_save_state(struct pci_dev * dev)1544 int pci_save_state(struct pci_dev *dev)
1545 {
1546 	int i;
1547 	/* XXX: 100% dword access ok here? */
1548 	for (i = 0; i < 16; i++) {
1549 		pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1550 		pci_dbg(dev, "saving config space at offset %#x (reading %#x)\n",
1551 			i * 4, dev->saved_config_space[i]);
1552 	}
1553 	dev->state_saved = true;
1554 
1555 	i = pci_save_pcie_state(dev);
1556 	if (i != 0)
1557 		return i;
1558 
1559 	i = pci_save_pcix_state(dev);
1560 	if (i != 0)
1561 		return i;
1562 
1563 	pci_save_ltr_state(dev);
1564 	pci_save_dpc_state(dev);
1565 	pci_save_aer_state(dev);
1566 	return pci_save_vc_state(dev);
1567 }
1568 EXPORT_SYMBOL(pci_save_state);
1569 
pci_restore_config_dword(struct pci_dev * pdev,int offset,u32 saved_val,int retry,bool force)1570 static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1571 				     u32 saved_val, int retry, bool force)
1572 {
1573 	u32 val;
1574 
1575 	pci_read_config_dword(pdev, offset, &val);
1576 	if (!force && val == saved_val)
1577 		return;
1578 
1579 	for (;;) {
1580 		pci_dbg(pdev, "restoring config space at offset %#x (was %#x, writing %#x)\n",
1581 			offset, val, saved_val);
1582 		pci_write_config_dword(pdev, offset, saved_val);
1583 		if (retry-- <= 0)
1584 			return;
1585 
1586 		pci_read_config_dword(pdev, offset, &val);
1587 		if (val == saved_val)
1588 			return;
1589 
1590 		mdelay(1);
1591 	}
1592 }
1593 
pci_restore_config_space_range(struct pci_dev * pdev,int start,int end,int retry,bool force)1594 static void pci_restore_config_space_range(struct pci_dev *pdev,
1595 					   int start, int end, int retry,
1596 					   bool force)
1597 {
1598 	int index;
1599 
1600 	for (index = end; index >= start; index--)
1601 		pci_restore_config_dword(pdev, 4 * index,
1602 					 pdev->saved_config_space[index],
1603 					 retry, force);
1604 }
1605 
pci_restore_config_space(struct pci_dev * pdev)1606 static void pci_restore_config_space(struct pci_dev *pdev)
1607 {
1608 	if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1609 		pci_restore_config_space_range(pdev, 10, 15, 0, false);
1610 		/* Restore BARs before the command register. */
1611 		pci_restore_config_space_range(pdev, 4, 9, 10, false);
1612 		pci_restore_config_space_range(pdev, 0, 3, 0, false);
1613 	} else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1614 		pci_restore_config_space_range(pdev, 12, 15, 0, false);
1615 
1616 		/*
1617 		 * Force rewriting of prefetch registers to avoid S3 resume
1618 		 * issues on Intel PCI bridges that occur when these
1619 		 * registers are not explicitly written.
1620 		 */
1621 		pci_restore_config_space_range(pdev, 9, 11, 0, true);
1622 		pci_restore_config_space_range(pdev, 0, 8, 0, false);
1623 	} else {
1624 		pci_restore_config_space_range(pdev, 0, 15, 0, false);
1625 	}
1626 }
1627 
pci_restore_rebar_state(struct pci_dev * pdev)1628 static void pci_restore_rebar_state(struct pci_dev *pdev)
1629 {
1630 	unsigned int pos, nbars, i;
1631 	u32 ctrl;
1632 
1633 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
1634 	if (!pos)
1635 		return;
1636 
1637 	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1638 	nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
1639 		    PCI_REBAR_CTRL_NBAR_SHIFT;
1640 
1641 	for (i = 0; i < nbars; i++, pos += 8) {
1642 		struct resource *res;
1643 		int bar_idx, size;
1644 
1645 		pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1646 		bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
1647 		res = pdev->resource + bar_idx;
1648 		size = ilog2(resource_size(res)) - 20;
1649 		ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
1650 		ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
1651 		pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
1652 	}
1653 }
1654 
1655 /**
1656  * pci_restore_state - Restore the saved state of a PCI device
1657  * @dev: PCI device that we're dealing with
1658  */
pci_restore_state(struct pci_dev * dev)1659 void pci_restore_state(struct pci_dev *dev)
1660 {
1661 	if (!dev->state_saved)
1662 		return;
1663 
1664 	/*
1665 	 * Restore max latencies (in the LTR capability) before enabling
1666 	 * LTR itself (in the PCIe capability).
1667 	 */
1668 	pci_restore_ltr_state(dev);
1669 
1670 	pci_restore_pcie_state(dev);
1671 	pci_restore_pasid_state(dev);
1672 	pci_restore_pri_state(dev);
1673 	pci_restore_ats_state(dev);
1674 	pci_restore_vc_state(dev);
1675 	pci_restore_rebar_state(dev);
1676 	pci_restore_dpc_state(dev);
1677 
1678 	pci_aer_clear_status(dev);
1679 	pci_restore_aer_state(dev);
1680 
1681 	pci_restore_config_space(dev);
1682 
1683 	pci_restore_pcix_state(dev);
1684 	pci_restore_msi_state(dev);
1685 
1686 	/* Restore ACS and IOV configuration state */
1687 	pci_enable_acs(dev);
1688 	pci_restore_iov_state(dev);
1689 
1690 	dev->state_saved = false;
1691 }
1692 EXPORT_SYMBOL(pci_restore_state);
1693 
1694 struct pci_saved_state {
1695 	u32 config_space[16];
1696 	struct pci_cap_saved_data cap[];
1697 };
1698 
1699 /**
1700  * pci_store_saved_state - Allocate and return an opaque struct containing
1701  *			   the device saved state.
1702  * @dev: PCI device that we're dealing with
1703  *
1704  * Return NULL if no state or error.
1705  */
pci_store_saved_state(struct pci_dev * dev)1706 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1707 {
1708 	struct pci_saved_state *state;
1709 	struct pci_cap_saved_state *tmp;
1710 	struct pci_cap_saved_data *cap;
1711 	size_t size;
1712 
1713 	if (!dev->state_saved)
1714 		return NULL;
1715 
1716 	size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1717 
1718 	hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1719 		size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1720 
1721 	state = kzalloc(size, GFP_KERNEL);
1722 	if (!state)
1723 		return NULL;
1724 
1725 	memcpy(state->config_space, dev->saved_config_space,
1726 	       sizeof(state->config_space));
1727 
1728 	cap = state->cap;
1729 	hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1730 		size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1731 		memcpy(cap, &tmp->cap, len);
1732 		cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1733 	}
1734 	/* Empty cap_save terminates list */
1735 
1736 	return state;
1737 }
1738 EXPORT_SYMBOL_GPL(pci_store_saved_state);
1739 
1740 /**
1741  * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1742  * @dev: PCI device that we're dealing with
1743  * @state: Saved state returned from pci_store_saved_state()
1744  */
pci_load_saved_state(struct pci_dev * dev,struct pci_saved_state * state)1745 int pci_load_saved_state(struct pci_dev *dev,
1746 			 struct pci_saved_state *state)
1747 {
1748 	struct pci_cap_saved_data *cap;
1749 
1750 	dev->state_saved = false;
1751 
1752 	if (!state)
1753 		return 0;
1754 
1755 	memcpy(dev->saved_config_space, state->config_space,
1756 	       sizeof(state->config_space));
1757 
1758 	cap = state->cap;
1759 	while (cap->size) {
1760 		struct pci_cap_saved_state *tmp;
1761 
1762 		tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
1763 		if (!tmp || tmp->cap.size != cap->size)
1764 			return -EINVAL;
1765 
1766 		memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1767 		cap = (struct pci_cap_saved_data *)((u8 *)cap +
1768 		       sizeof(struct pci_cap_saved_data) + cap->size);
1769 	}
1770 
1771 	dev->state_saved = true;
1772 	return 0;
1773 }
1774 EXPORT_SYMBOL_GPL(pci_load_saved_state);
1775 
1776 /**
1777  * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1778  *				   and free the memory allocated for it.
1779  * @dev: PCI device that we're dealing with
1780  * @state: Pointer to saved state returned from pci_store_saved_state()
1781  */
pci_load_and_free_saved_state(struct pci_dev * dev,struct pci_saved_state ** state)1782 int pci_load_and_free_saved_state(struct pci_dev *dev,
1783 				  struct pci_saved_state **state)
1784 {
1785 	int ret = pci_load_saved_state(dev, *state);
1786 	kfree(*state);
1787 	*state = NULL;
1788 	return ret;
1789 }
1790 EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1791 
pcibios_enable_device(struct pci_dev * dev,int bars)1792 int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
1793 {
1794 	return pci_enable_resources(dev, bars);
1795 }
1796 
do_pci_enable_device(struct pci_dev * dev,int bars)1797 static int do_pci_enable_device(struct pci_dev *dev, int bars)
1798 {
1799 	int err;
1800 	struct pci_dev *bridge;
1801 	u16 cmd;
1802 	u8 pin;
1803 
1804 	err = pci_set_power_state(dev, PCI_D0);
1805 	if (err < 0 && err != -EIO)
1806 		return err;
1807 
1808 	bridge = pci_upstream_bridge(dev);
1809 	if (bridge)
1810 		pcie_aspm_powersave_config_link(bridge);
1811 
1812 	err = pcibios_enable_device(dev, bars);
1813 	if (err < 0)
1814 		return err;
1815 	pci_fixup_device(pci_fixup_enable, dev);
1816 
1817 	if (dev->msi_enabled || dev->msix_enabled)
1818 		return 0;
1819 
1820 	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
1821 	if (pin) {
1822 		pci_read_config_word(dev, PCI_COMMAND, &cmd);
1823 		if (cmd & PCI_COMMAND_INTX_DISABLE)
1824 			pci_write_config_word(dev, PCI_COMMAND,
1825 					      cmd & ~PCI_COMMAND_INTX_DISABLE);
1826 	}
1827 
1828 	return 0;
1829 }
1830 
1831 /**
1832  * pci_reenable_device - Resume abandoned device
1833  * @dev: PCI device to be resumed
1834  *
1835  * NOTE: This function is a backend of pci_default_resume() and is not supposed
1836  * to be called by normal code, write proper resume handler and use it instead.
1837  */
pci_reenable_device(struct pci_dev * dev)1838 int pci_reenable_device(struct pci_dev *dev)
1839 {
1840 	if (pci_is_enabled(dev))
1841 		return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1842 	return 0;
1843 }
1844 EXPORT_SYMBOL(pci_reenable_device);
1845 
pci_enable_bridge(struct pci_dev * dev)1846 static void pci_enable_bridge(struct pci_dev *dev)
1847 {
1848 	struct pci_dev *bridge;
1849 	int retval;
1850 
1851 	bridge = pci_upstream_bridge(dev);
1852 	if (bridge)
1853 		pci_enable_bridge(bridge);
1854 
1855 	if (pci_is_enabled(dev)) {
1856 		if (!dev->is_busmaster)
1857 			pci_set_master(dev);
1858 		return;
1859 	}
1860 
1861 	retval = pci_enable_device(dev);
1862 	if (retval)
1863 		pci_err(dev, "Error enabling bridge (%d), continuing\n",
1864 			retval);
1865 	pci_set_master(dev);
1866 }
1867 
pci_enable_device_flags(struct pci_dev * dev,unsigned long flags)1868 static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
1869 {
1870 	struct pci_dev *bridge;
1871 	int err;
1872 	int i, bars = 0;
1873 
1874 	/*
1875 	 * Power state could be unknown at this point, either due to a fresh
1876 	 * boot or a device removal call.  So get the current power state
1877 	 * so that things like MSI message writing will behave as expected
1878 	 * (e.g. if the device really is in D0 at enable time).
1879 	 */
1880 	pci_update_current_state(dev, dev->current_state);
1881 
1882 	if (atomic_inc_return(&dev->enable_cnt) > 1)
1883 		return 0;		/* already enabled */
1884 
1885 	bridge = pci_upstream_bridge(dev);
1886 	if (bridge)
1887 		pci_enable_bridge(bridge);
1888 
1889 	/* only skip sriov related */
1890 	for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1891 		if (dev->resource[i].flags & flags)
1892 			bars |= (1 << i);
1893 	for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1894 		if (dev->resource[i].flags & flags)
1895 			bars |= (1 << i);
1896 
1897 	err = do_pci_enable_device(dev, bars);
1898 	if (err < 0)
1899 		atomic_dec(&dev->enable_cnt);
1900 	return err;
1901 }
1902 
1903 /**
1904  * pci_enable_device_io - Initialize a device for use with IO space
1905  * @dev: PCI device to be initialized
1906  *
1907  * Initialize device before it's used by a driver. Ask low-level code
1908  * to enable I/O resources. Wake up the device if it was suspended.
1909  * Beware, this function can fail.
1910  */
pci_enable_device_io(struct pci_dev * dev)1911 int pci_enable_device_io(struct pci_dev *dev)
1912 {
1913 	return pci_enable_device_flags(dev, IORESOURCE_IO);
1914 }
1915 EXPORT_SYMBOL(pci_enable_device_io);
1916 
1917 /**
1918  * pci_enable_device_mem - Initialize a device for use with Memory space
1919  * @dev: PCI device to be initialized
1920  *
1921  * Initialize device before it's used by a driver. Ask low-level code
1922  * to enable Memory resources. Wake up the device if it was suspended.
1923  * Beware, this function can fail.
1924  */
pci_enable_device_mem(struct pci_dev * dev)1925 int pci_enable_device_mem(struct pci_dev *dev)
1926 {
1927 	return pci_enable_device_flags(dev, IORESOURCE_MEM);
1928 }
1929 EXPORT_SYMBOL(pci_enable_device_mem);
1930 
1931 /**
1932  * pci_enable_device - Initialize device before it's used by a driver.
1933  * @dev: PCI device to be initialized
1934  *
1935  * Initialize device before it's used by a driver. Ask low-level code
1936  * to enable I/O and memory. Wake up the device if it was suspended.
1937  * Beware, this function can fail.
1938  *
1939  * Note we don't actually enable the device many times if we call
1940  * this function repeatedly (we just increment the count).
1941  */
pci_enable_device(struct pci_dev * dev)1942 int pci_enable_device(struct pci_dev *dev)
1943 {
1944 	return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1945 }
1946 EXPORT_SYMBOL(pci_enable_device);
1947 
1948 /*
1949  * Managed PCI resources.  This manages device on/off, INTx/MSI/MSI-X
1950  * on/off and BAR regions.  pci_dev itself records MSI/MSI-X status, so
1951  * there's no need to track it separately.  pci_devres is initialized
1952  * when a device is enabled using managed PCI device enable interface.
1953  */
1954 struct pci_devres {
1955 	unsigned int enabled:1;
1956 	unsigned int pinned:1;
1957 	unsigned int orig_intx:1;
1958 	unsigned int restore_intx:1;
1959 	unsigned int mwi:1;
1960 	u32 region_mask;
1961 };
1962 
pcim_release(struct device * gendev,void * res)1963 static void pcim_release(struct device *gendev, void *res)
1964 {
1965 	struct pci_dev *dev = to_pci_dev(gendev);
1966 	struct pci_devres *this = res;
1967 	int i;
1968 
1969 	if (dev->msi_enabled)
1970 		pci_disable_msi(dev);
1971 	if (dev->msix_enabled)
1972 		pci_disable_msix(dev);
1973 
1974 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1975 		if (this->region_mask & (1 << i))
1976 			pci_release_region(dev, i);
1977 
1978 	if (this->mwi)
1979 		pci_clear_mwi(dev);
1980 
1981 	if (this->restore_intx)
1982 		pci_intx(dev, this->orig_intx);
1983 
1984 	if (this->enabled && !this->pinned)
1985 		pci_disable_device(dev);
1986 }
1987 
get_pci_dr(struct pci_dev * pdev)1988 static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
1989 {
1990 	struct pci_devres *dr, *new_dr;
1991 
1992 	dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1993 	if (dr)
1994 		return dr;
1995 
1996 	new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1997 	if (!new_dr)
1998 		return NULL;
1999 	return devres_get(&pdev->dev, new_dr, NULL, NULL);
2000 }
2001 
find_pci_dr(struct pci_dev * pdev)2002 static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
2003 {
2004 	if (pci_is_managed(pdev))
2005 		return devres_find(&pdev->dev, pcim_release, NULL, NULL);
2006 	return NULL;
2007 }
2008 
2009 /**
2010  * pcim_enable_device - Managed pci_enable_device()
2011  * @pdev: PCI device to be initialized
2012  *
2013  * Managed pci_enable_device().
2014  */
pcim_enable_device(struct pci_dev * pdev)2015 int pcim_enable_device(struct pci_dev *pdev)
2016 {
2017 	struct pci_devres *dr;
2018 	int rc;
2019 
2020 	dr = get_pci_dr(pdev);
2021 	if (unlikely(!dr))
2022 		return -ENOMEM;
2023 	if (dr->enabled)
2024 		return 0;
2025 
2026 	rc = pci_enable_device(pdev);
2027 	if (!rc) {
2028 		pdev->is_managed = 1;
2029 		dr->enabled = 1;
2030 	}
2031 	return rc;
2032 }
2033 EXPORT_SYMBOL(pcim_enable_device);
2034 
2035 /**
2036  * pcim_pin_device - Pin managed PCI device
2037  * @pdev: PCI device to pin
2038  *
2039  * Pin managed PCI device @pdev.  Pinned device won't be disabled on
2040  * driver detach.  @pdev must have been enabled with
2041  * pcim_enable_device().
2042  */
pcim_pin_device(struct pci_dev * pdev)2043 void pcim_pin_device(struct pci_dev *pdev)
2044 {
2045 	struct pci_devres *dr;
2046 
2047 	dr = find_pci_dr(pdev);
2048 	WARN_ON(!dr || !dr->enabled);
2049 	if (dr)
2050 		dr->pinned = 1;
2051 }
2052 EXPORT_SYMBOL(pcim_pin_device);
2053 
2054 /*
2055  * pcibios_add_device - provide arch specific hooks when adding device dev
2056  * @dev: the PCI device being added
2057  *
2058  * Permits the platform to provide architecture specific functionality when
2059  * devices are added. This is the default implementation. Architecture
2060  * implementations can override this.
2061  */
pcibios_add_device(struct pci_dev * dev)2062 int __weak pcibios_add_device(struct pci_dev *dev)
2063 {
2064 	return 0;
2065 }
2066 
2067 /**
2068  * pcibios_release_device - provide arch specific hooks when releasing
2069  *			    device dev
2070  * @dev: the PCI device being released
2071  *
2072  * Permits the platform to provide architecture specific functionality when
2073  * devices are released. This is the default implementation. Architecture
2074  * implementations can override this.
2075  */
pcibios_release_device(struct pci_dev * dev)2076 void __weak pcibios_release_device(struct pci_dev *dev) {}
2077 
2078 /**
2079  * pcibios_disable_device - disable arch specific PCI resources for device dev
2080  * @dev: the PCI device to disable
2081  *
2082  * Disables architecture specific PCI resources for the device. This
2083  * is the default implementation. Architecture implementations can
2084  * override this.
2085  */
pcibios_disable_device(struct pci_dev * dev)2086 void __weak pcibios_disable_device(struct pci_dev *dev) {}
2087 
2088 /**
2089  * pcibios_penalize_isa_irq - penalize an ISA IRQ
2090  * @irq: ISA IRQ to penalize
2091  * @active: IRQ active or not
2092  *
2093  * Permits the platform to provide architecture-specific functionality when
2094  * penalizing ISA IRQs. This is the default implementation. Architecture
2095  * implementations can override this.
2096  */
pcibios_penalize_isa_irq(int irq,int active)2097 void __weak pcibios_penalize_isa_irq(int irq, int active) {}
2098 
do_pci_disable_device(struct pci_dev * dev)2099 static void do_pci_disable_device(struct pci_dev *dev)
2100 {
2101 	u16 pci_command;
2102 
2103 	pci_read_config_word(dev, PCI_COMMAND, &pci_command);
2104 	if (pci_command & PCI_COMMAND_MASTER) {
2105 		pci_command &= ~PCI_COMMAND_MASTER;
2106 		pci_write_config_word(dev, PCI_COMMAND, pci_command);
2107 	}
2108 
2109 	pcibios_disable_device(dev);
2110 }
2111 
2112 /**
2113  * pci_disable_enabled_device - Disable device without updating enable_cnt
2114  * @dev: PCI device to disable
2115  *
2116  * NOTE: This function is a backend of PCI power management routines and is
2117  * not supposed to be called drivers.
2118  */
pci_disable_enabled_device(struct pci_dev * dev)2119 void pci_disable_enabled_device(struct pci_dev *dev)
2120 {
2121 	if (pci_is_enabled(dev))
2122 		do_pci_disable_device(dev);
2123 }
2124 
2125 /**
2126  * pci_disable_device - Disable PCI device after use
2127  * @dev: PCI device to be disabled
2128  *
2129  * Signal to the system that the PCI device is not in use by the system
2130  * anymore.  This only involves disabling PCI bus-mastering, if active.
2131  *
2132  * Note we don't actually disable the device until all callers of
2133  * pci_enable_device() have called pci_disable_device().
2134  */
pci_disable_device(struct pci_dev * dev)2135 void pci_disable_device(struct pci_dev *dev)
2136 {
2137 	struct pci_devres *dr;
2138 
2139 	dr = find_pci_dr(dev);
2140 	if (dr)
2141 		dr->enabled = 0;
2142 
2143 	dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
2144 		      "disabling already-disabled device");
2145 
2146 	if (atomic_dec_return(&dev->enable_cnt) != 0)
2147 		return;
2148 
2149 	do_pci_disable_device(dev);
2150 
2151 	dev->is_busmaster = 0;
2152 }
2153 EXPORT_SYMBOL(pci_disable_device);
2154 
2155 /**
2156  * pcibios_set_pcie_reset_state - set reset state for device dev
2157  * @dev: the PCIe device reset
2158  * @state: Reset state to enter into
2159  *
2160  * Set the PCIe reset state for the device. This is the default
2161  * implementation. Architecture implementations can override this.
2162  */
pcibios_set_pcie_reset_state(struct pci_dev * dev,enum pcie_reset_state state)2163 int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
2164 					enum pcie_reset_state state)
2165 {
2166 	return -EINVAL;
2167 }
2168 
2169 /**
2170  * pci_set_pcie_reset_state - set reset state for device dev
2171  * @dev: the PCIe device reset
2172  * @state: Reset state to enter into
2173  *
2174  * Sets the PCI reset state for the device.
2175  */
pci_set_pcie_reset_state(struct pci_dev * dev,enum pcie_reset_state state)2176 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
2177 {
2178 	return pcibios_set_pcie_reset_state(dev, state);
2179 }
2180 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
2181 
pcie_clear_device_status(struct pci_dev * dev)2182 void pcie_clear_device_status(struct pci_dev *dev)
2183 {
2184 	u16 sta;
2185 
2186 	pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta);
2187 	pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta);
2188 }
2189 
2190 /**
2191  * pcie_clear_root_pme_status - Clear root port PME interrupt status.
2192  * @dev: PCIe root port or event collector.
2193  */
pcie_clear_root_pme_status(struct pci_dev * dev)2194 void pcie_clear_root_pme_status(struct pci_dev *dev)
2195 {
2196 	pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME);
2197 }
2198 
2199 /**
2200  * pci_check_pme_status - Check if given device has generated PME.
2201  * @dev: Device to check.
2202  *
2203  * Check the PME status of the device and if set, clear it and clear PME enable
2204  * (if set).  Return 'true' if PME status and PME enable were both set or
2205  * 'false' otherwise.
2206  */
pci_check_pme_status(struct pci_dev * dev)2207 bool pci_check_pme_status(struct pci_dev *dev)
2208 {
2209 	int pmcsr_pos;
2210 	u16 pmcsr;
2211 	bool ret = false;
2212 
2213 	if (!dev->pm_cap)
2214 		return false;
2215 
2216 	pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
2217 	pci_read_config_word(dev, pmcsr_pos, &pmcsr);
2218 	if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
2219 		return false;
2220 
2221 	/* Clear PME status. */
2222 	pmcsr |= PCI_PM_CTRL_PME_STATUS;
2223 	if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
2224 		/* Disable PME to avoid interrupt flood. */
2225 		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2226 		ret = true;
2227 	}
2228 
2229 	pci_write_config_word(dev, pmcsr_pos, pmcsr);
2230 
2231 	return ret;
2232 }
2233 
2234 /**
2235  * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
2236  * @dev: Device to handle.
2237  * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
2238  *
2239  * Check if @dev has generated PME and queue a resume request for it in that
2240  * case.
2241  */
pci_pme_wakeup(struct pci_dev * dev,void * pme_poll_reset)2242 static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
2243 {
2244 	if (pme_poll_reset && dev->pme_poll)
2245 		dev->pme_poll = false;
2246 
2247 	if (pci_check_pme_status(dev)) {
2248 		pci_wakeup_event(dev);
2249 		pm_request_resume(&dev->dev);
2250 	}
2251 	return 0;
2252 }
2253 
2254 /**
2255  * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
2256  * @bus: Top bus of the subtree to walk.
2257  */
pci_pme_wakeup_bus(struct pci_bus * bus)2258 void pci_pme_wakeup_bus(struct pci_bus *bus)
2259 {
2260 	if (bus)
2261 		pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
2262 }
2263 
2264 
2265 /**
2266  * pci_pme_capable - check the capability of PCI device to generate PME#
2267  * @dev: PCI device to handle.
2268  * @state: PCI state from which device will issue PME#.
2269  */
pci_pme_capable(struct pci_dev * dev,pci_power_t state)2270 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
2271 {
2272 	if (!dev->pm_cap)
2273 		return false;
2274 
2275 	return !!(dev->pme_support & (1 << state));
2276 }
2277 EXPORT_SYMBOL(pci_pme_capable);
2278 
pci_pme_list_scan(struct work_struct * work)2279 static void pci_pme_list_scan(struct work_struct *work)
2280 {
2281 	struct pci_pme_device *pme_dev, *n;
2282 
2283 	mutex_lock(&pci_pme_list_mutex);
2284 	list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
2285 		if (pme_dev->dev->pme_poll) {
2286 			struct pci_dev *bridge;
2287 
2288 			bridge = pme_dev->dev->bus->self;
2289 			/*
2290 			 * If bridge is in low power state, the
2291 			 * configuration space of subordinate devices
2292 			 * may be not accessible
2293 			 */
2294 			if (bridge && bridge->current_state != PCI_D0)
2295 				continue;
2296 			/*
2297 			 * If the device is in D3cold it should not be
2298 			 * polled either.
2299 			 */
2300 			if (pme_dev->dev->current_state == PCI_D3cold)
2301 				continue;
2302 
2303 			pci_pme_wakeup(pme_dev->dev, NULL);
2304 		} else {
2305 			list_del(&pme_dev->list);
2306 			kfree(pme_dev);
2307 		}
2308 	}
2309 	if (!list_empty(&pci_pme_list))
2310 		queue_delayed_work(system_freezable_wq, &pci_pme_work,
2311 				   msecs_to_jiffies(PME_TIMEOUT));
2312 	mutex_unlock(&pci_pme_list_mutex);
2313 }
2314 
__pci_pme_active(struct pci_dev * dev,bool enable)2315 static void __pci_pme_active(struct pci_dev *dev, bool enable)
2316 {
2317 	u16 pmcsr;
2318 
2319 	if (!dev->pme_support)
2320 		return;
2321 
2322 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2323 	/* Clear PME_Status by writing 1 to it and enable PME# */
2324 	pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
2325 	if (!enable)
2326 		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2327 
2328 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2329 }
2330 
2331 /**
2332  * pci_pme_restore - Restore PME configuration after config space restore.
2333  * @dev: PCI device to update.
2334  */
pci_pme_restore(struct pci_dev * dev)2335 void pci_pme_restore(struct pci_dev *dev)
2336 {
2337 	u16 pmcsr;
2338 
2339 	if (!dev->pme_support)
2340 		return;
2341 
2342 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2343 	if (dev->wakeup_prepared) {
2344 		pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2345 		pmcsr &= ~PCI_PM_CTRL_PME_STATUS;
2346 	} else {
2347 		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2348 		pmcsr |= PCI_PM_CTRL_PME_STATUS;
2349 	}
2350 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2351 }
2352 
2353 /**
2354  * pci_pme_active - enable or disable PCI device's PME# function
2355  * @dev: PCI device to handle.
2356  * @enable: 'true' to enable PME# generation; 'false' to disable it.
2357  *
2358  * The caller must verify that the device is capable of generating PME# before
2359  * calling this function with @enable equal to 'true'.
2360  */
pci_pme_active(struct pci_dev * dev,bool enable)2361 void pci_pme_active(struct pci_dev *dev, bool enable)
2362 {
2363 	__pci_pme_active(dev, enable);
2364 
2365 	/*
2366 	 * PCI (as opposed to PCIe) PME requires that the device have
2367 	 * its PME# line hooked up correctly. Not all hardware vendors
2368 	 * do this, so the PME never gets delivered and the device
2369 	 * remains asleep. The easiest way around this is to
2370 	 * periodically walk the list of suspended devices and check
2371 	 * whether any have their PME flag set. The assumption is that
2372 	 * we'll wake up often enough anyway that this won't be a huge
2373 	 * hit, and the power savings from the devices will still be a
2374 	 * win.
2375 	 *
2376 	 * Although PCIe uses in-band PME message instead of PME# line
2377 	 * to report PME, PME does not work for some PCIe devices in
2378 	 * reality.  For example, there are devices that set their PME
2379 	 * status bits, but don't really bother to send a PME message;
2380 	 * there are PCI Express Root Ports that don't bother to
2381 	 * trigger interrupts when they receive PME messages from the
2382 	 * devices below.  So PME poll is used for PCIe devices too.
2383 	 */
2384 
2385 	if (dev->pme_poll) {
2386 		struct pci_pme_device *pme_dev;
2387 		if (enable) {
2388 			pme_dev = kmalloc(sizeof(struct pci_pme_device),
2389 					  GFP_KERNEL);
2390 			if (!pme_dev) {
2391 				pci_warn(dev, "can't enable PME#\n");
2392 				return;
2393 			}
2394 			pme_dev->dev = dev;
2395 			mutex_lock(&pci_pme_list_mutex);
2396 			list_add(&pme_dev->list, &pci_pme_list);
2397 			if (list_is_singular(&pci_pme_list))
2398 				queue_delayed_work(system_freezable_wq,
2399 						   &pci_pme_work,
2400 						   msecs_to_jiffies(PME_TIMEOUT));
2401 			mutex_unlock(&pci_pme_list_mutex);
2402 		} else {
2403 			mutex_lock(&pci_pme_list_mutex);
2404 			list_for_each_entry(pme_dev, &pci_pme_list, list) {
2405 				if (pme_dev->dev == dev) {
2406 					list_del(&pme_dev->list);
2407 					kfree(pme_dev);
2408 					break;
2409 				}
2410 			}
2411 			mutex_unlock(&pci_pme_list_mutex);
2412 		}
2413 	}
2414 
2415 	pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled");
2416 }
2417 EXPORT_SYMBOL(pci_pme_active);
2418 
2419 /**
2420  * __pci_enable_wake - enable PCI device as wakeup event source
2421  * @dev: PCI device affected
2422  * @state: PCI state from which device will issue wakeup events
2423  * @enable: True to enable event generation; false to disable
2424  *
2425  * This enables the device as a wakeup event source, or disables it.
2426  * When such events involves platform-specific hooks, those hooks are
2427  * called automatically by this routine.
2428  *
2429  * Devices with legacy power management (no standard PCI PM capabilities)
2430  * always require such platform hooks.
2431  *
2432  * RETURN VALUE:
2433  * 0 is returned on success
2434  * -EINVAL is returned if device is not supposed to wake up the system
2435  * Error code depending on the platform is returned if both the platform and
2436  * the native mechanism fail to enable the generation of wake-up events
2437  */
__pci_enable_wake(struct pci_dev * dev,pci_power_t state,bool enable)2438 static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
2439 {
2440 	int ret = 0;
2441 
2442 	/*
2443 	 * Bridges that are not power-manageable directly only signal
2444 	 * wakeup on behalf of subordinate devices which is set up
2445 	 * elsewhere, so skip them. However, bridges that are
2446 	 * power-manageable may signal wakeup for themselves (for example,
2447 	 * on a hotplug event) and they need to be covered here.
2448 	 */
2449 	if (!pci_power_manageable(dev))
2450 		return 0;
2451 
2452 	/* Don't do the same thing twice in a row for one device. */
2453 	if (!!enable == !!dev->wakeup_prepared)
2454 		return 0;
2455 
2456 	/*
2457 	 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
2458 	 * Anderson we should be doing PME# wake enable followed by ACPI wake
2459 	 * enable.  To disable wake-up we call the platform first, for symmetry.
2460 	 */
2461 
2462 	if (enable) {
2463 		int error;
2464 
2465 		/*
2466 		 * Enable PME signaling if the device can signal PME from
2467 		 * D3cold regardless of whether or not it can signal PME from
2468 		 * the current target state, because that will allow it to
2469 		 * signal PME when the hierarchy above it goes into D3cold and
2470 		 * the device itself ends up in D3cold as a result of that.
2471 		 */
2472 		if (pci_pme_capable(dev, state) || pci_pme_capable(dev, PCI_D3cold))
2473 			pci_pme_active(dev, true);
2474 		else
2475 			ret = 1;
2476 		error = platform_pci_set_wakeup(dev, true);
2477 		if (ret)
2478 			ret = error;
2479 		if (!ret)
2480 			dev->wakeup_prepared = true;
2481 	} else {
2482 		platform_pci_set_wakeup(dev, false);
2483 		pci_pme_active(dev, false);
2484 		dev->wakeup_prepared = false;
2485 	}
2486 
2487 	return ret;
2488 }
2489 
2490 /**
2491  * pci_enable_wake - change wakeup settings for a PCI device
2492  * @pci_dev: Target device
2493  * @state: PCI state from which device will issue wakeup events
2494  * @enable: Whether or not to enable event generation
2495  *
2496  * If @enable is set, check device_may_wakeup() for the device before calling
2497  * __pci_enable_wake() for it.
2498  */
pci_enable_wake(struct pci_dev * pci_dev,pci_power_t state,bool enable)2499 int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
2500 {
2501 	if (enable && !device_may_wakeup(&pci_dev->dev))
2502 		return -EINVAL;
2503 
2504 	return __pci_enable_wake(pci_dev, state, enable);
2505 }
2506 EXPORT_SYMBOL(pci_enable_wake);
2507 
2508 /**
2509  * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
2510  * @dev: PCI device to prepare
2511  * @enable: True to enable wake-up event generation; false to disable
2512  *
2513  * Many drivers want the device to wake up the system from D3_hot or D3_cold
2514  * and this function allows them to set that up cleanly - pci_enable_wake()
2515  * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
2516  * ordering constraints.
2517  *
2518  * This function only returns error code if the device is not allowed to wake
2519  * up the system from sleep or it is not capable of generating PME# from both
2520  * D3_hot and D3_cold and the platform is unable to enable wake-up power for it.
2521  */
pci_wake_from_d3(struct pci_dev * dev,bool enable)2522 int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2523 {
2524 	return pci_pme_capable(dev, PCI_D3cold) ?
2525 			pci_enable_wake(dev, PCI_D3cold, enable) :
2526 			pci_enable_wake(dev, PCI_D3hot, enable);
2527 }
2528 EXPORT_SYMBOL(pci_wake_from_d3);
2529 
2530 /**
2531  * pci_target_state - find an appropriate low power state for a given PCI dev
2532  * @dev: PCI device
2533  * @wakeup: Whether or not wakeup functionality will be enabled for the device.
2534  *
2535  * Use underlying platform code to find a supported low power state for @dev.
2536  * If the platform can't manage @dev, return the deepest state from which it
2537  * can generate wake events, based on any available PME info.
2538  */
pci_target_state(struct pci_dev * dev,bool wakeup)2539 static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
2540 {
2541 	pci_power_t target_state = PCI_D3hot;
2542 
2543 	if (platform_pci_power_manageable(dev)) {
2544 		/*
2545 		 * Call the platform to find the target state for the device.
2546 		 */
2547 		pci_power_t state = platform_pci_choose_state(dev);
2548 
2549 		switch (state) {
2550 		case PCI_POWER_ERROR:
2551 		case PCI_UNKNOWN:
2552 			break;
2553 		case PCI_D1:
2554 		case PCI_D2:
2555 			if (pci_no_d1d2(dev))
2556 				break;
2557 			fallthrough;
2558 		default:
2559 			target_state = state;
2560 		}
2561 
2562 		return target_state;
2563 	}
2564 
2565 	if (!dev->pm_cap)
2566 		target_state = PCI_D0;
2567 
2568 	/*
2569 	 * If the device is in D3cold even though it's not power-manageable by
2570 	 * the platform, it may have been powered down by non-standard means.
2571 	 * Best to let it slumber.
2572 	 */
2573 	if (dev->current_state == PCI_D3cold)
2574 		target_state = PCI_D3cold;
2575 
2576 	if (wakeup && dev->pme_support) {
2577 		pci_power_t state = target_state;
2578 
2579 		/*
2580 		 * Find the deepest state from which the device can generate
2581 		 * PME#.
2582 		 */
2583 		while (state && !(dev->pme_support & (1 << state)))
2584 			state--;
2585 
2586 		if (state)
2587 			return state;
2588 		else if (dev->pme_support & 1)
2589 			return PCI_D0;
2590 	}
2591 
2592 	return target_state;
2593 }
2594 
2595 /**
2596  * pci_prepare_to_sleep - prepare PCI device for system-wide transition
2597  *			  into a sleep state
2598  * @dev: Device to handle.
2599  *
2600  * Choose the power state appropriate for the device depending on whether
2601  * it can wake up the system and/or is power manageable by the platform
2602  * (PCI_D3hot is the default) and put the device into that state.
2603  */
pci_prepare_to_sleep(struct pci_dev * dev)2604 int pci_prepare_to_sleep(struct pci_dev *dev)
2605 {
2606 	bool wakeup = device_may_wakeup(&dev->dev);
2607 	pci_power_t target_state = pci_target_state(dev, wakeup);
2608 	int error;
2609 
2610 	if (target_state == PCI_POWER_ERROR)
2611 		return -EIO;
2612 
2613 	pci_enable_wake(dev, target_state, wakeup);
2614 
2615 	error = pci_set_power_state(dev, target_state);
2616 
2617 	if (error)
2618 		pci_enable_wake(dev, target_state, false);
2619 
2620 	return error;
2621 }
2622 EXPORT_SYMBOL(pci_prepare_to_sleep);
2623 
2624 /**
2625  * pci_back_from_sleep - turn PCI device on during system-wide transition
2626  *			 into working state
2627  * @dev: Device to handle.
2628  *
2629  * Disable device's system wake-up capability and put it into D0.
2630  */
pci_back_from_sleep(struct pci_dev * dev)2631 int pci_back_from_sleep(struct pci_dev *dev)
2632 {
2633 	pci_enable_wake(dev, PCI_D0, false);
2634 	return pci_set_power_state(dev, PCI_D0);
2635 }
2636 EXPORT_SYMBOL(pci_back_from_sleep);
2637 
2638 /**
2639  * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
2640  * @dev: PCI device being suspended.
2641  *
2642  * Prepare @dev to generate wake-up events at run time and put it into a low
2643  * power state.
2644  */
pci_finish_runtime_suspend(struct pci_dev * dev)2645 int pci_finish_runtime_suspend(struct pci_dev *dev)
2646 {
2647 	pci_power_t target_state;
2648 	int error;
2649 
2650 	target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
2651 	if (target_state == PCI_POWER_ERROR)
2652 		return -EIO;
2653 
2654 	dev->runtime_d3cold = target_state == PCI_D3cold;
2655 
2656 	__pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
2657 
2658 	error = pci_set_power_state(dev, target_state);
2659 
2660 	if (error) {
2661 		pci_enable_wake(dev, target_state, false);
2662 		dev->runtime_d3cold = false;
2663 	}
2664 
2665 	return error;
2666 }
2667 
2668 /**
2669  * pci_dev_run_wake - Check if device can generate run-time wake-up events.
2670  * @dev: Device to check.
2671  *
2672  * Return true if the device itself is capable of generating wake-up events
2673  * (through the platform or using the native PCIe PME) or if the device supports
2674  * PME and one of its upstream bridges can generate wake-up events.
2675  */
pci_dev_run_wake(struct pci_dev * dev)2676 bool pci_dev_run_wake(struct pci_dev *dev)
2677 {
2678 	struct pci_bus *bus = dev->bus;
2679 
2680 	if (!dev->pme_support)
2681 		return false;
2682 
2683 	/* PME-capable in principle, but not from the target power state */
2684 	if (!pci_pme_capable(dev, pci_target_state(dev, true)))
2685 		return false;
2686 
2687 	if (device_can_wakeup(&dev->dev))
2688 		return true;
2689 
2690 	while (bus->parent) {
2691 		struct pci_dev *bridge = bus->self;
2692 
2693 		if (device_can_wakeup(&bridge->dev))
2694 			return true;
2695 
2696 		bus = bus->parent;
2697 	}
2698 
2699 	/* We have reached the root bus. */
2700 	if (bus->bridge)
2701 		return device_can_wakeup(bus->bridge);
2702 
2703 	return false;
2704 }
2705 EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2706 
2707 /**
2708  * pci_dev_need_resume - Check if it is necessary to resume the device.
2709  * @pci_dev: Device to check.
2710  *
2711  * Return 'true' if the device is not runtime-suspended or it has to be
2712  * reconfigured due to wakeup settings difference between system and runtime
2713  * suspend, or the current power state of it is not suitable for the upcoming
2714  * (system-wide) transition.
2715  */
pci_dev_need_resume(struct pci_dev * pci_dev)2716 bool pci_dev_need_resume(struct pci_dev *pci_dev)
2717 {
2718 	struct device *dev = &pci_dev->dev;
2719 	pci_power_t target_state;
2720 
2721 	if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev))
2722 		return true;
2723 
2724 	target_state = pci_target_state(pci_dev, device_may_wakeup(dev));
2725 
2726 	/*
2727 	 * If the earlier platform check has not triggered, D3cold is just power
2728 	 * removal on top of D3hot, so no need to resume the device in that
2729 	 * case.
2730 	 */
2731 	return target_state != pci_dev->current_state &&
2732 		target_state != PCI_D3cold &&
2733 		pci_dev->current_state != PCI_D3hot;
2734 }
2735 
2736 /**
2737  * pci_dev_adjust_pme - Adjust PME setting for a suspended device.
2738  * @pci_dev: Device to check.
2739  *
2740  * If the device is suspended and it is not configured for system wakeup,
2741  * disable PME for it to prevent it from waking up the system unnecessarily.
2742  *
2743  * Note that if the device's power state is D3cold and the platform check in
2744  * pci_dev_need_resume() has not triggered, the device's configuration need not
2745  * be changed.
2746  */
pci_dev_adjust_pme(struct pci_dev * pci_dev)2747 void pci_dev_adjust_pme(struct pci_dev *pci_dev)
2748 {
2749 	struct device *dev = &pci_dev->dev;
2750 
2751 	spin_lock_irq(&dev->power.lock);
2752 
2753 	if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) &&
2754 	    pci_dev->current_state < PCI_D3cold)
2755 		__pci_pme_active(pci_dev, false);
2756 
2757 	spin_unlock_irq(&dev->power.lock);
2758 }
2759 
2760 /**
2761  * pci_dev_complete_resume - Finalize resume from system sleep for a device.
2762  * @pci_dev: Device to handle.
2763  *
2764  * If the device is runtime suspended and wakeup-capable, enable PME for it as
2765  * it might have been disabled during the prepare phase of system suspend if
2766  * the device was not configured for system wakeup.
2767  */
pci_dev_complete_resume(struct pci_dev * pci_dev)2768 void pci_dev_complete_resume(struct pci_dev *pci_dev)
2769 {
2770 	struct device *dev = &pci_dev->dev;
2771 
2772 	if (!pci_dev_run_wake(pci_dev))
2773 		return;
2774 
2775 	spin_lock_irq(&dev->power.lock);
2776 
2777 	if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
2778 		__pci_pme_active(pci_dev, true);
2779 
2780 	spin_unlock_irq(&dev->power.lock);
2781 }
2782 
pci_config_pm_runtime_get(struct pci_dev * pdev)2783 void pci_config_pm_runtime_get(struct pci_dev *pdev)
2784 {
2785 	struct device *dev = &pdev->dev;
2786 	struct device *parent = dev->parent;
2787 
2788 	if (parent)
2789 		pm_runtime_get_sync(parent);
2790 	pm_runtime_get_noresume(dev);
2791 	/*
2792 	 * pdev->current_state is set to PCI_D3cold during suspending,
2793 	 * so wait until suspending completes
2794 	 */
2795 	pm_runtime_barrier(dev);
2796 	/*
2797 	 * Only need to resume devices in D3cold, because config
2798 	 * registers are still accessible for devices suspended but
2799 	 * not in D3cold.
2800 	 */
2801 	if (pdev->current_state == PCI_D3cold)
2802 		pm_runtime_resume(dev);
2803 }
2804 
pci_config_pm_runtime_put(struct pci_dev * pdev)2805 void pci_config_pm_runtime_put(struct pci_dev *pdev)
2806 {
2807 	struct device *dev = &pdev->dev;
2808 	struct device *parent = dev->parent;
2809 
2810 	pm_runtime_put(dev);
2811 	if (parent)
2812 		pm_runtime_put_sync(parent);
2813 }
2814 
2815 static const struct dmi_system_id bridge_d3_blacklist[] = {
2816 #ifdef CONFIG_X86
2817 	{
2818 		/*
2819 		 * Gigabyte X299 root port is not marked as hotplug capable
2820 		 * which allows Linux to power manage it.  However, this
2821 		 * confuses the BIOS SMI handler so don't power manage root
2822 		 * ports on that system.
2823 		 */
2824 		.ident = "X299 DESIGNARE EX-CF",
2825 		.matches = {
2826 			DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
2827 			DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
2828 		},
2829 	},
2830 	{
2831 		/*
2832 		 * Downstream device is not accessible after putting a root port
2833 		 * into D3cold and back into D0 on Elo Continental Z2 board
2834 		 */
2835 		.ident = "Elo Continental Z2",
2836 		.matches = {
2837 			DMI_MATCH(DMI_BOARD_VENDOR, "Elo Touch Solutions"),
2838 			DMI_MATCH(DMI_BOARD_NAME, "Geminilake"),
2839 			DMI_MATCH(DMI_BOARD_VERSION, "Continental Z2"),
2840 		},
2841 	},
2842 #endif
2843 	{ }
2844 };
2845 
2846 /**
2847  * pci_bridge_d3_possible - Is it possible to put the bridge into D3
2848  * @bridge: Bridge to check
2849  *
2850  * This function checks if it is possible to move the bridge to D3.
2851  * Currently we only allow D3 for recent enough PCIe ports and Thunderbolt.
2852  */
pci_bridge_d3_possible(struct pci_dev * bridge)2853 bool pci_bridge_d3_possible(struct pci_dev *bridge)
2854 {
2855 	if (!pci_is_pcie(bridge))
2856 		return false;
2857 
2858 	switch (pci_pcie_type(bridge)) {
2859 	case PCI_EXP_TYPE_ROOT_PORT:
2860 	case PCI_EXP_TYPE_UPSTREAM:
2861 	case PCI_EXP_TYPE_DOWNSTREAM:
2862 		if (pci_bridge_d3_disable)
2863 			return false;
2864 
2865 		/*
2866 		 * Hotplug ports handled by firmware in System Management Mode
2867 		 * may not be put into D3 by the OS (Thunderbolt on non-Macs).
2868 		 */
2869 		if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
2870 			return false;
2871 
2872 		if (pci_bridge_d3_force)
2873 			return true;
2874 
2875 		/* Even the oldest 2010 Thunderbolt controller supports D3. */
2876 		if (bridge->is_thunderbolt)
2877 			return true;
2878 
2879 		/* Platform might know better if the bridge supports D3 */
2880 		if (platform_pci_bridge_d3(bridge))
2881 			return true;
2882 
2883 		/*
2884 		 * Hotplug ports handled natively by the OS were not validated
2885 		 * by vendors for runtime D3 at least until 2018 because there
2886 		 * was no OS support.
2887 		 */
2888 		if (bridge->is_hotplug_bridge)
2889 			return false;
2890 
2891 		if (dmi_check_system(bridge_d3_blacklist))
2892 			return false;
2893 
2894 		/*
2895 		 * It should be safe to put PCIe ports from 2015 or newer
2896 		 * to D3.
2897 		 */
2898 		if (dmi_get_bios_year() >= 2015)
2899 			return true;
2900 		break;
2901 	}
2902 
2903 	return false;
2904 }
2905 
pci_dev_check_d3cold(struct pci_dev * dev,void * data)2906 static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
2907 {
2908 	bool *d3cold_ok = data;
2909 
2910 	if (/* The device needs to be allowed to go D3cold ... */
2911 	    dev->no_d3cold || !dev->d3cold_allowed ||
2912 
2913 	    /* ... and if it is wakeup capable to do so from D3cold. */
2914 	    (device_may_wakeup(&dev->dev) &&
2915 	     !pci_pme_capable(dev, PCI_D3cold)) ||
2916 
2917 	    /* If it is a bridge it must be allowed to go to D3. */
2918 	    !pci_power_manageable(dev))
2919 
2920 		*d3cold_ok = false;
2921 
2922 	return !*d3cold_ok;
2923 }
2924 
2925 /*
2926  * pci_bridge_d3_update - Update bridge D3 capabilities
2927  * @dev: PCI device which is changed
2928  *
2929  * Update upstream bridge PM capabilities accordingly depending on if the
2930  * device PM configuration was changed or the device is being removed.  The
2931  * change is also propagated upstream.
2932  */
pci_bridge_d3_update(struct pci_dev * dev)2933 void pci_bridge_d3_update(struct pci_dev *dev)
2934 {
2935 	bool remove = !device_is_registered(&dev->dev);
2936 	struct pci_dev *bridge;
2937 	bool d3cold_ok = true;
2938 
2939 	bridge = pci_upstream_bridge(dev);
2940 	if (!bridge || !pci_bridge_d3_possible(bridge))
2941 		return;
2942 
2943 	/*
2944 	 * If D3 is currently allowed for the bridge, removing one of its
2945 	 * children won't change that.
2946 	 */
2947 	if (remove && bridge->bridge_d3)
2948 		return;
2949 
2950 	/*
2951 	 * If D3 is currently allowed for the bridge and a child is added or
2952 	 * changed, disallowance of D3 can only be caused by that child, so
2953 	 * we only need to check that single device, not any of its siblings.
2954 	 *
2955 	 * If D3 is currently not allowed for the bridge, checking the device
2956 	 * first may allow us to skip checking its siblings.
2957 	 */
2958 	if (!remove)
2959 		pci_dev_check_d3cold(dev, &d3cold_ok);
2960 
2961 	/*
2962 	 * If D3 is currently not allowed for the bridge, this may be caused
2963 	 * either by the device being changed/removed or any of its siblings,
2964 	 * so we need to go through all children to find out if one of them
2965 	 * continues to block D3.
2966 	 */
2967 	if (d3cold_ok && !bridge->bridge_d3)
2968 		pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
2969 			     &d3cold_ok);
2970 
2971 	if (bridge->bridge_d3 != d3cold_ok) {
2972 		bridge->bridge_d3 = d3cold_ok;
2973 		/* Propagate change to upstream bridges */
2974 		pci_bridge_d3_update(bridge);
2975 	}
2976 }
2977 
2978 /**
2979  * pci_d3cold_enable - Enable D3cold for device
2980  * @dev: PCI device to handle
2981  *
2982  * This function can be used in drivers to enable D3cold from the device
2983  * they handle.  It also updates upstream PCI bridge PM capabilities
2984  * accordingly.
2985  */
pci_d3cold_enable(struct pci_dev * dev)2986 void pci_d3cold_enable(struct pci_dev *dev)
2987 {
2988 	if (dev->no_d3cold) {
2989 		dev->no_d3cold = false;
2990 		pci_bridge_d3_update(dev);
2991 	}
2992 }
2993 EXPORT_SYMBOL_GPL(pci_d3cold_enable);
2994 
2995 /**
2996  * pci_d3cold_disable - Disable D3cold for device
2997  * @dev: PCI device to handle
2998  *
2999  * This function can be used in drivers to disable D3cold from the device
3000  * they handle.  It also updates upstream PCI bridge PM capabilities
3001  * accordingly.
3002  */
pci_d3cold_disable(struct pci_dev * dev)3003 void pci_d3cold_disable(struct pci_dev *dev)
3004 {
3005 	if (!dev->no_d3cold) {
3006 		dev->no_d3cold = true;
3007 		pci_bridge_d3_update(dev);
3008 	}
3009 }
3010 EXPORT_SYMBOL_GPL(pci_d3cold_disable);
3011 
3012 /**
3013  * pci_pm_init - Initialize PM functions of given PCI device
3014  * @dev: PCI device to handle.
3015  */
pci_pm_init(struct pci_dev * dev)3016 void pci_pm_init(struct pci_dev *dev)
3017 {
3018 	int pm;
3019 	u16 status;
3020 	u16 pmc;
3021 
3022 	pm_runtime_forbid(&dev->dev);
3023 	pm_runtime_set_active(&dev->dev);
3024 	pm_runtime_enable(&dev->dev);
3025 	device_enable_async_suspend(&dev->dev);
3026 	dev->wakeup_prepared = false;
3027 
3028 	dev->pm_cap = 0;
3029 	dev->pme_support = 0;
3030 
3031 	/* find PCI PM capability in list */
3032 	pm = pci_find_capability(dev, PCI_CAP_ID_PM);
3033 	if (!pm)
3034 		return;
3035 	/* Check device's ability to generate PME# */
3036 	pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
3037 
3038 	if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
3039 		pci_err(dev, "unsupported PM cap regs version (%u)\n",
3040 			pmc & PCI_PM_CAP_VER_MASK);
3041 		return;
3042 	}
3043 
3044 	dev->pm_cap = pm;
3045 	dev->d3hot_delay = PCI_PM_D3HOT_WAIT;
3046 	dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
3047 	dev->bridge_d3 = pci_bridge_d3_possible(dev);
3048 	dev->d3cold_allowed = true;
3049 
3050 	dev->d1_support = false;
3051 	dev->d2_support = false;
3052 	if (!pci_no_d1d2(dev)) {
3053 		if (pmc & PCI_PM_CAP_D1)
3054 			dev->d1_support = true;
3055 		if (pmc & PCI_PM_CAP_D2)
3056 			dev->d2_support = true;
3057 
3058 		if (dev->d1_support || dev->d2_support)
3059 			pci_info(dev, "supports%s%s\n",
3060 				   dev->d1_support ? " D1" : "",
3061 				   dev->d2_support ? " D2" : "");
3062 	}
3063 
3064 	pmc &= PCI_PM_CAP_PME_MASK;
3065 	if (pmc) {
3066 		pci_info(dev, "PME# supported from%s%s%s%s%s\n",
3067 			 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
3068 			 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
3069 			 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
3070 			 (pmc & PCI_PM_CAP_PME_D3hot) ? " D3hot" : "",
3071 			 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
3072 		dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
3073 		dev->pme_poll = true;
3074 		/*
3075 		 * Make device's PM flags reflect the wake-up capability, but
3076 		 * let the user space enable it to wake up the system as needed.
3077 		 */
3078 		device_set_wakeup_capable(&dev->dev, true);
3079 		/* Disable the PME# generation functionality */
3080 		pci_pme_active(dev, false);
3081 	}
3082 
3083 	pci_read_config_word(dev, PCI_STATUS, &status);
3084 	if (status & PCI_STATUS_IMM_READY)
3085 		dev->imm_ready = 1;
3086 }
3087 
pci_ea_flags(struct pci_dev * dev,u8 prop)3088 static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
3089 {
3090 	unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
3091 
3092 	switch (prop) {
3093 	case PCI_EA_P_MEM:
3094 	case PCI_EA_P_VF_MEM:
3095 		flags |= IORESOURCE_MEM;
3096 		break;
3097 	case PCI_EA_P_MEM_PREFETCH:
3098 	case PCI_EA_P_VF_MEM_PREFETCH:
3099 		flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
3100 		break;
3101 	case PCI_EA_P_IO:
3102 		flags |= IORESOURCE_IO;
3103 		break;
3104 	default:
3105 		return 0;
3106 	}
3107 
3108 	return flags;
3109 }
3110 
pci_ea_get_resource(struct pci_dev * dev,u8 bei,u8 prop)3111 static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
3112 					    u8 prop)
3113 {
3114 	if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
3115 		return &dev->resource[bei];
3116 #ifdef CONFIG_PCI_IOV
3117 	else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
3118 		 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
3119 		return &dev->resource[PCI_IOV_RESOURCES +
3120 				      bei - PCI_EA_BEI_VF_BAR0];
3121 #endif
3122 	else if (bei == PCI_EA_BEI_ROM)
3123 		return &dev->resource[PCI_ROM_RESOURCE];
3124 	else
3125 		return NULL;
3126 }
3127 
3128 /* Read an Enhanced Allocation (EA) entry */
pci_ea_read(struct pci_dev * dev,int offset)3129 static int pci_ea_read(struct pci_dev *dev, int offset)
3130 {
3131 	struct resource *res;
3132 	int ent_size, ent_offset = offset;
3133 	resource_size_t start, end;
3134 	unsigned long flags;
3135 	u32 dw0, bei, base, max_offset;
3136 	u8 prop;
3137 	bool support_64 = (sizeof(resource_size_t) >= 8);
3138 
3139 	pci_read_config_dword(dev, ent_offset, &dw0);
3140 	ent_offset += 4;
3141 
3142 	/* Entry size field indicates DWORDs after 1st */
3143 	ent_size = ((dw0 & PCI_EA_ES) + 1) << 2;
3144 
3145 	if (!(dw0 & PCI_EA_ENABLE)) /* Entry not enabled */
3146 		goto out;
3147 
3148 	bei = (dw0 & PCI_EA_BEI) >> 4;
3149 	prop = (dw0 & PCI_EA_PP) >> 8;
3150 
3151 	/*
3152 	 * If the Property is in the reserved range, try the Secondary
3153 	 * Property instead.
3154 	 */
3155 	if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
3156 		prop = (dw0 & PCI_EA_SP) >> 16;
3157 	if (prop > PCI_EA_P_BRIDGE_IO)
3158 		goto out;
3159 
3160 	res = pci_ea_get_resource(dev, bei, prop);
3161 	if (!res) {
3162 		pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
3163 		goto out;
3164 	}
3165 
3166 	flags = pci_ea_flags(dev, prop);
3167 	if (!flags) {
3168 		pci_err(dev, "Unsupported EA properties: %#x\n", prop);
3169 		goto out;
3170 	}
3171 
3172 	/* Read Base */
3173 	pci_read_config_dword(dev, ent_offset, &base);
3174 	start = (base & PCI_EA_FIELD_MASK);
3175 	ent_offset += 4;
3176 
3177 	/* Read MaxOffset */
3178 	pci_read_config_dword(dev, ent_offset, &max_offset);
3179 	ent_offset += 4;
3180 
3181 	/* Read Base MSBs (if 64-bit entry) */
3182 	if (base & PCI_EA_IS_64) {
3183 		u32 base_upper;
3184 
3185 		pci_read_config_dword(dev, ent_offset, &base_upper);
3186 		ent_offset += 4;
3187 
3188 		flags |= IORESOURCE_MEM_64;
3189 
3190 		/* entry starts above 32-bit boundary, can't use */
3191 		if (!support_64 && base_upper)
3192 			goto out;
3193 
3194 		if (support_64)
3195 			start |= ((u64)base_upper << 32);
3196 	}
3197 
3198 	end = start + (max_offset | 0x03);
3199 
3200 	/* Read MaxOffset MSBs (if 64-bit entry) */
3201 	if (max_offset & PCI_EA_IS_64) {
3202 		u32 max_offset_upper;
3203 
3204 		pci_read_config_dword(dev, ent_offset, &max_offset_upper);
3205 		ent_offset += 4;
3206 
3207 		flags |= IORESOURCE_MEM_64;
3208 
3209 		/* entry too big, can't use */
3210 		if (!support_64 && max_offset_upper)
3211 			goto out;
3212 
3213 		if (support_64)
3214 			end += ((u64)max_offset_upper << 32);
3215 	}
3216 
3217 	if (end < start) {
3218 		pci_err(dev, "EA Entry crosses address boundary\n");
3219 		goto out;
3220 	}
3221 
3222 	if (ent_size != ent_offset - offset) {
3223 		pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n",
3224 			ent_size, ent_offset - offset);
3225 		goto out;
3226 	}
3227 
3228 	res->name = pci_name(dev);
3229 	res->start = start;
3230 	res->end = end;
3231 	res->flags = flags;
3232 
3233 	if (bei <= PCI_EA_BEI_BAR5)
3234 		pci_info(dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3235 			   bei, res, prop);
3236 	else if (bei == PCI_EA_BEI_ROM)
3237 		pci_info(dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
3238 			   res, prop);
3239 	else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
3240 		pci_info(dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3241 			   bei - PCI_EA_BEI_VF_BAR0, res, prop);
3242 	else
3243 		pci_info(dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
3244 			   bei, res, prop);
3245 
3246 out:
3247 	return offset + ent_size;
3248 }
3249 
3250 /* Enhanced Allocation Initialization */
pci_ea_init(struct pci_dev * dev)3251 void pci_ea_init(struct pci_dev *dev)
3252 {
3253 	int ea;
3254 	u8 num_ent;
3255 	int offset;
3256 	int i;
3257 
3258 	/* find PCI EA capability in list */
3259 	ea = pci_find_capability(dev, PCI_CAP_ID_EA);
3260 	if (!ea)
3261 		return;
3262 
3263 	/* determine the number of entries */
3264 	pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
3265 					&num_ent);
3266 	num_ent &= PCI_EA_NUM_ENT_MASK;
3267 
3268 	offset = ea + PCI_EA_FIRST_ENT;
3269 
3270 	/* Skip DWORD 2 for type 1 functions */
3271 	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
3272 		offset += 4;
3273 
3274 	/* parse each EA entry */
3275 	for (i = 0; i < num_ent; ++i)
3276 		offset = pci_ea_read(dev, offset);
3277 }
3278 
pci_add_saved_cap(struct pci_dev * pci_dev,struct pci_cap_saved_state * new_cap)3279 static void pci_add_saved_cap(struct pci_dev *pci_dev,
3280 	struct pci_cap_saved_state *new_cap)
3281 {
3282 	hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
3283 }
3284 
3285 /**
3286  * _pci_add_cap_save_buffer - allocate buffer for saving given
3287  *			      capability registers
3288  * @dev: the PCI device
3289  * @cap: the capability to allocate the buffer for
3290  * @extended: Standard or Extended capability ID
3291  * @size: requested size of the buffer
3292  */
_pci_add_cap_save_buffer(struct pci_dev * dev,u16 cap,bool extended,unsigned int size)3293 static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
3294 				    bool extended, unsigned int size)
3295 {
3296 	int pos;
3297 	struct pci_cap_saved_state *save_state;
3298 
3299 	if (extended)
3300 		pos = pci_find_ext_capability(dev, cap);
3301 	else
3302 		pos = pci_find_capability(dev, cap);
3303 
3304 	if (!pos)
3305 		return 0;
3306 
3307 	save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
3308 	if (!save_state)
3309 		return -ENOMEM;
3310 
3311 	save_state->cap.cap_nr = cap;
3312 	save_state->cap.cap_extended = extended;
3313 	save_state->cap.size = size;
3314 	pci_add_saved_cap(dev, save_state);
3315 
3316 	return 0;
3317 }
3318 
pci_add_cap_save_buffer(struct pci_dev * dev,char cap,unsigned int size)3319 int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
3320 {
3321 	return _pci_add_cap_save_buffer(dev, cap, false, size);
3322 }
3323 
pci_add_ext_cap_save_buffer(struct pci_dev * dev,u16 cap,unsigned int size)3324 int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
3325 {
3326 	return _pci_add_cap_save_buffer(dev, cap, true, size);
3327 }
3328 
3329 /**
3330  * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
3331  * @dev: the PCI device
3332  */
pci_allocate_cap_save_buffers(struct pci_dev * dev)3333 void pci_allocate_cap_save_buffers(struct pci_dev *dev)
3334 {
3335 	int error;
3336 
3337 	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
3338 					PCI_EXP_SAVE_REGS * sizeof(u16));
3339 	if (error)
3340 		pci_err(dev, "unable to preallocate PCI Express save buffer\n");
3341 
3342 	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
3343 	if (error)
3344 		pci_err(dev, "unable to preallocate PCI-X save buffer\n");
3345 
3346 	error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR,
3347 					    2 * sizeof(u16));
3348 	if (error)
3349 		pci_err(dev, "unable to allocate suspend buffer for LTR\n");
3350 
3351 	pci_allocate_vc_save_buffers(dev);
3352 }
3353 
pci_free_cap_save_buffers(struct pci_dev * dev)3354 void pci_free_cap_save_buffers(struct pci_dev *dev)
3355 {
3356 	struct pci_cap_saved_state *tmp;
3357 	struct hlist_node *n;
3358 
3359 	hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
3360 		kfree(tmp);
3361 }
3362 
3363 /**
3364  * pci_configure_ari - enable or disable ARI forwarding
3365  * @dev: the PCI device
3366  *
3367  * If @dev and its upstream bridge both support ARI, enable ARI in the
3368  * bridge.  Otherwise, disable ARI in the bridge.
3369  */
pci_configure_ari(struct pci_dev * dev)3370 void pci_configure_ari(struct pci_dev *dev)
3371 {
3372 	u32 cap;
3373 	struct pci_dev *bridge;
3374 
3375 	if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
3376 		return;
3377 
3378 	bridge = dev->bus->self;
3379 	if (!bridge)
3380 		return;
3381 
3382 	pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3383 	if (!(cap & PCI_EXP_DEVCAP2_ARI))
3384 		return;
3385 
3386 	if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
3387 		pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
3388 					 PCI_EXP_DEVCTL2_ARI);
3389 		bridge->ari_enabled = 1;
3390 	} else {
3391 		pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
3392 					   PCI_EXP_DEVCTL2_ARI);
3393 		bridge->ari_enabled = 0;
3394 	}
3395 }
3396 
pci_acs_flags_enabled(struct pci_dev * pdev,u16 acs_flags)3397 static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
3398 {
3399 	int pos;
3400 	u16 cap, ctrl;
3401 
3402 	pos = pdev->acs_cap;
3403 	if (!pos)
3404 		return false;
3405 
3406 	/*
3407 	 * Except for egress control, capabilities are either required
3408 	 * or only required if controllable.  Features missing from the
3409 	 * capability field can therefore be assumed as hard-wired enabled.
3410 	 */
3411 	pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
3412 	acs_flags &= (cap | PCI_ACS_EC);
3413 
3414 	pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
3415 	return (ctrl & acs_flags) == acs_flags;
3416 }
3417 
3418 /**
3419  * pci_acs_enabled - test ACS against required flags for a given device
3420  * @pdev: device to test
3421  * @acs_flags: required PCI ACS flags
3422  *
3423  * Return true if the device supports the provided flags.  Automatically
3424  * filters out flags that are not implemented on multifunction devices.
3425  *
3426  * Note that this interface checks the effective ACS capabilities of the
3427  * device rather than the actual capabilities.  For instance, most single
3428  * function endpoints are not required to support ACS because they have no
3429  * opportunity for peer-to-peer access.  We therefore return 'true'
3430  * regardless of whether the device exposes an ACS capability.  This makes
3431  * it much easier for callers of this function to ignore the actual type
3432  * or topology of the device when testing ACS support.
3433  */
pci_acs_enabled(struct pci_dev * pdev,u16 acs_flags)3434 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
3435 {
3436 	int ret;
3437 
3438 	ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
3439 	if (ret >= 0)
3440 		return ret > 0;
3441 
3442 	/*
3443 	 * Conventional PCI and PCI-X devices never support ACS, either
3444 	 * effectively or actually.  The shared bus topology implies that
3445 	 * any device on the bus can receive or snoop DMA.
3446 	 */
3447 	if (!pci_is_pcie(pdev))
3448 		return false;
3449 
3450 	switch (pci_pcie_type(pdev)) {
3451 	/*
3452 	 * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
3453 	 * but since their primary interface is PCI/X, we conservatively
3454 	 * handle them as we would a non-PCIe device.
3455 	 */
3456 	case PCI_EXP_TYPE_PCIE_BRIDGE:
3457 	/*
3458 	 * PCIe 3.0, 6.12.1 excludes ACS on these devices.  "ACS is never
3459 	 * applicable... must never implement an ACS Extended Capability...".
3460 	 * This seems arbitrary, but we take a conservative interpretation
3461 	 * of this statement.
3462 	 */
3463 	case PCI_EXP_TYPE_PCI_BRIDGE:
3464 	case PCI_EXP_TYPE_RC_EC:
3465 		return false;
3466 	/*
3467 	 * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should
3468 	 * implement ACS in order to indicate their peer-to-peer capabilities,
3469 	 * regardless of whether they are single- or multi-function devices.
3470 	 */
3471 	case PCI_EXP_TYPE_DOWNSTREAM:
3472 	case PCI_EXP_TYPE_ROOT_PORT:
3473 		return pci_acs_flags_enabled(pdev, acs_flags);
3474 	/*
3475 	 * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
3476 	 * implemented by the remaining PCIe types to indicate peer-to-peer
3477 	 * capabilities, but only when they are part of a multifunction
3478 	 * device.  The footnote for section 6.12 indicates the specific
3479 	 * PCIe types included here.
3480 	 */
3481 	case PCI_EXP_TYPE_ENDPOINT:
3482 	case PCI_EXP_TYPE_UPSTREAM:
3483 	case PCI_EXP_TYPE_LEG_END:
3484 	case PCI_EXP_TYPE_RC_END:
3485 		if (!pdev->multifunction)
3486 			break;
3487 
3488 		return pci_acs_flags_enabled(pdev, acs_flags);
3489 	}
3490 
3491 	/*
3492 	 * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable
3493 	 * to single function devices with the exception of downstream ports.
3494 	 */
3495 	return true;
3496 }
3497 
3498 /**
3499  * pci_acs_path_enable - test ACS flags from start to end in a hierarchy
3500  * @start: starting downstream device
3501  * @end: ending upstream device or NULL to search to the root bus
3502  * @acs_flags: required flags
3503  *
3504  * Walk up a device tree from start to end testing PCI ACS support.  If
3505  * any step along the way does not support the required flags, return false.
3506  */
pci_acs_path_enabled(struct pci_dev * start,struct pci_dev * end,u16 acs_flags)3507 bool pci_acs_path_enabled(struct pci_dev *start,
3508 			  struct pci_dev *end, u16 acs_flags)
3509 {
3510 	struct pci_dev *pdev, *parent = start;
3511 
3512 	do {
3513 		pdev = parent;
3514 
3515 		if (!pci_acs_enabled(pdev, acs_flags))
3516 			return false;
3517 
3518 		if (pci_is_root_bus(pdev->bus))
3519 			return (end == NULL);
3520 
3521 		parent = pdev->bus->self;
3522 	} while (pdev != end);
3523 
3524 	return true;
3525 }
3526 
3527 /**
3528  * pci_acs_init - Initialize ACS if hardware supports it
3529  * @dev: the PCI device
3530  */
pci_acs_init(struct pci_dev * dev)3531 void pci_acs_init(struct pci_dev *dev)
3532 {
3533 	dev->acs_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
3534 
3535 	/*
3536 	 * Attempt to enable ACS regardless of capability because some Root
3537 	 * Ports (e.g. those quirked with *_intel_pch_acs_*) do not have
3538 	 * the standard ACS capability but still support ACS via those
3539 	 * quirks.
3540 	 */
3541 	pci_enable_acs(dev);
3542 }
3543 
3544 /**
3545  * pci_rebar_find_pos - find position of resize ctrl reg for BAR
3546  * @pdev: PCI device
3547  * @bar: BAR to find
3548  *
3549  * Helper to find the position of the ctrl register for a BAR.
3550  * Returns -ENOTSUPP if resizable BARs are not supported at all.
3551  * Returns -ENOENT if no ctrl register for the BAR could be found.
3552  */
pci_rebar_find_pos(struct pci_dev * pdev,int bar)3553 static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
3554 {
3555 	unsigned int pos, nbars, i;
3556 	u32 ctrl;
3557 
3558 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
3559 	if (!pos)
3560 		return -ENOTSUPP;
3561 
3562 	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3563 	nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
3564 		    PCI_REBAR_CTRL_NBAR_SHIFT;
3565 
3566 	for (i = 0; i < nbars; i++, pos += 8) {
3567 		int bar_idx;
3568 
3569 		pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3570 		bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
3571 		if (bar_idx == bar)
3572 			return pos;
3573 	}
3574 
3575 	return -ENOENT;
3576 }
3577 
3578 /**
3579  * pci_rebar_get_possible_sizes - get possible sizes for BAR
3580  * @pdev: PCI device
3581  * @bar: BAR to query
3582  *
3583  * Get the possible sizes of a resizable BAR as bitmask defined in the spec
3584  * (bit 0=1MB, bit 19=512GB). Returns 0 if BAR isn't resizable.
3585  */
pci_rebar_get_possible_sizes(struct pci_dev * pdev,int bar)3586 u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
3587 {
3588 	int pos;
3589 	u32 cap;
3590 
3591 	pos = pci_rebar_find_pos(pdev, bar);
3592 	if (pos < 0)
3593 		return 0;
3594 
3595 	pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
3596 	cap &= PCI_REBAR_CAP_SIZES;
3597 
3598 	/* Sapphire RX 5600 XT Pulse has an invalid cap dword for BAR 0 */
3599 	if (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x731f &&
3600 	    bar == 0 && cap == 0x7000)
3601 		cap = 0x3f000;
3602 
3603 	return cap >> 4;
3604 }
3605 
3606 /**
3607  * pci_rebar_get_current_size - get the current size of a BAR
3608  * @pdev: PCI device
3609  * @bar: BAR to set size to
3610  *
3611  * Read the size of a BAR from the resizable BAR config.
3612  * Returns size if found or negative error code.
3613  */
pci_rebar_get_current_size(struct pci_dev * pdev,int bar)3614 int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
3615 {
3616 	int pos;
3617 	u32 ctrl;
3618 
3619 	pos = pci_rebar_find_pos(pdev, bar);
3620 	if (pos < 0)
3621 		return pos;
3622 
3623 	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3624 	return (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> PCI_REBAR_CTRL_BAR_SHIFT;
3625 }
3626 
3627 /**
3628  * pci_rebar_set_size - set a new size for a BAR
3629  * @pdev: PCI device
3630  * @bar: BAR to set size to
3631  * @size: new size as defined in the spec (0=1MB, 19=512GB)
3632  *
3633  * Set the new size of a BAR as defined in the spec.
3634  * Returns zero if resizing was successful, error code otherwise.
3635  */
pci_rebar_set_size(struct pci_dev * pdev,int bar,int size)3636 int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
3637 {
3638 	int pos;
3639 	u32 ctrl;
3640 
3641 	pos = pci_rebar_find_pos(pdev, bar);
3642 	if (pos < 0)
3643 		return pos;
3644 
3645 	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3646 	ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
3647 	ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
3648 	pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
3649 	return 0;
3650 }
3651 
3652 /**
3653  * pci_enable_atomic_ops_to_root - enable AtomicOp requests to root port
3654  * @dev: the PCI device
3655  * @cap_mask: mask of desired AtomicOp sizes, including one or more of:
3656  *	PCI_EXP_DEVCAP2_ATOMIC_COMP32
3657  *	PCI_EXP_DEVCAP2_ATOMIC_COMP64
3658  *	PCI_EXP_DEVCAP2_ATOMIC_COMP128
3659  *
3660  * Return 0 if all upstream bridges support AtomicOp routing, egress
3661  * blocking is disabled on all upstream ports, and the root port supports
3662  * the requested completion capabilities (32-bit, 64-bit and/or 128-bit
3663  * AtomicOp completion), or negative otherwise.
3664  */
pci_enable_atomic_ops_to_root(struct pci_dev * dev,u32 cap_mask)3665 int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
3666 {
3667 	struct pci_bus *bus = dev->bus;
3668 	struct pci_dev *bridge;
3669 	u32 cap, ctl2;
3670 
3671 	if (!pci_is_pcie(dev))
3672 		return -EINVAL;
3673 
3674 	/*
3675 	 * Per PCIe r4.0, sec 6.15, endpoints and root ports may be
3676 	 * AtomicOp requesters.  For now, we only support endpoints as
3677 	 * requesters and root ports as completers.  No endpoints as
3678 	 * completers, and no peer-to-peer.
3679 	 */
3680 
3681 	switch (pci_pcie_type(dev)) {
3682 	case PCI_EXP_TYPE_ENDPOINT:
3683 	case PCI_EXP_TYPE_LEG_END:
3684 	case PCI_EXP_TYPE_RC_END:
3685 		break;
3686 	default:
3687 		return -EINVAL;
3688 	}
3689 
3690 	while (bus->parent) {
3691 		bridge = bus->self;
3692 
3693 		pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3694 
3695 		switch (pci_pcie_type(bridge)) {
3696 		/* Ensure switch ports support AtomicOp routing */
3697 		case PCI_EXP_TYPE_UPSTREAM:
3698 		case PCI_EXP_TYPE_DOWNSTREAM:
3699 			if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
3700 				return -EINVAL;
3701 			break;
3702 
3703 		/* Ensure root port supports all the sizes we care about */
3704 		case PCI_EXP_TYPE_ROOT_PORT:
3705 			if ((cap & cap_mask) != cap_mask)
3706 				return -EINVAL;
3707 			break;
3708 		}
3709 
3710 		/* Ensure upstream ports don't block AtomicOps on egress */
3711 		if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) {
3712 			pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
3713 						   &ctl2);
3714 			if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
3715 				return -EINVAL;
3716 		}
3717 
3718 		bus = bus->parent;
3719 	}
3720 
3721 	pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
3722 				 PCI_EXP_DEVCTL2_ATOMIC_REQ);
3723 	return 0;
3724 }
3725 EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
3726 
3727 /**
3728  * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
3729  * @dev: the PCI device
3730  * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
3731  *
3732  * Perform INTx swizzling for a device behind one level of bridge.  This is
3733  * required by section 9.1 of the PCI-to-PCI bridge specification for devices
3734  * behind bridges on add-in cards.  For devices with ARI enabled, the slot
3735  * number is always 0 (see the Implementation Note in section 2.2.8.1 of
3736  * the PCI Express Base Specification, Revision 2.1)
3737  */
pci_swizzle_interrupt_pin(const struct pci_dev * dev,u8 pin)3738 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
3739 {
3740 	int slot;
3741 
3742 	if (pci_ari_enabled(dev->bus))
3743 		slot = 0;
3744 	else
3745 		slot = PCI_SLOT(dev->devfn);
3746 
3747 	return (((pin - 1) + slot) % 4) + 1;
3748 }
3749 
pci_get_interrupt_pin(struct pci_dev * dev,struct pci_dev ** bridge)3750 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
3751 {
3752 	u8 pin;
3753 
3754 	pin = dev->pin;
3755 	if (!pin)
3756 		return -1;
3757 
3758 	while (!pci_is_root_bus(dev->bus)) {
3759 		pin = pci_swizzle_interrupt_pin(dev, pin);
3760 		dev = dev->bus->self;
3761 	}
3762 	*bridge = dev;
3763 	return pin;
3764 }
3765 
3766 /**
3767  * pci_common_swizzle - swizzle INTx all the way to root bridge
3768  * @dev: the PCI device
3769  * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
3770  *
3771  * Perform INTx swizzling for a device.  This traverses through all PCI-to-PCI
3772  * bridges all the way up to a PCI root bus.
3773  */
pci_common_swizzle(struct pci_dev * dev,u8 * pinp)3774 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
3775 {
3776 	u8 pin = *pinp;
3777 
3778 	while (!pci_is_root_bus(dev->bus)) {
3779 		pin = pci_swizzle_interrupt_pin(dev, pin);
3780 		dev = dev->bus->self;
3781 	}
3782 	*pinp = pin;
3783 	return PCI_SLOT(dev->devfn);
3784 }
3785 EXPORT_SYMBOL_GPL(pci_common_swizzle);
3786 
3787 /**
3788  * pci_release_region - Release a PCI bar
3789  * @pdev: PCI device whose resources were previously reserved by
3790  *	  pci_request_region()
3791  * @bar: BAR to release
3792  *
3793  * Releases the PCI I/O and memory resources previously reserved by a
3794  * successful call to pci_request_region().  Call this function only
3795  * after all use of the PCI regions has ceased.
3796  */
pci_release_region(struct pci_dev * pdev,int bar)3797 void pci_release_region(struct pci_dev *pdev, int bar)
3798 {
3799 	struct pci_devres *dr;
3800 
3801 	if (pci_resource_len(pdev, bar) == 0)
3802 		return;
3803 	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
3804 		release_region(pci_resource_start(pdev, bar),
3805 				pci_resource_len(pdev, bar));
3806 	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
3807 		release_mem_region(pci_resource_start(pdev, bar),
3808 				pci_resource_len(pdev, bar));
3809 
3810 	dr = find_pci_dr(pdev);
3811 	if (dr)
3812 		dr->region_mask &= ~(1 << bar);
3813 }
3814 EXPORT_SYMBOL(pci_release_region);
3815 
3816 /**
3817  * __pci_request_region - Reserved PCI I/O and memory resource
3818  * @pdev: PCI device whose resources are to be reserved
3819  * @bar: BAR to be reserved
3820  * @res_name: Name to be associated with resource.
3821  * @exclusive: whether the region access is exclusive or not
3822  *
3823  * Mark the PCI region associated with PCI device @pdev BAR @bar as
3824  * being reserved by owner @res_name.  Do not access any
3825  * address inside the PCI regions unless this call returns
3826  * successfully.
3827  *
3828  * If @exclusive is set, then the region is marked so that userspace
3829  * is explicitly not allowed to map the resource via /dev/mem or
3830  * sysfs MMIO access.
3831  *
3832  * Returns 0 on success, or %EBUSY on error.  A warning
3833  * message is also printed on failure.
3834  */
__pci_request_region(struct pci_dev * pdev,int bar,const char * res_name,int exclusive)3835 static int __pci_request_region(struct pci_dev *pdev, int bar,
3836 				const char *res_name, int exclusive)
3837 {
3838 	struct pci_devres *dr;
3839 
3840 	if (pci_resource_len(pdev, bar) == 0)
3841 		return 0;
3842 
3843 	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
3844 		if (!request_region(pci_resource_start(pdev, bar),
3845 			    pci_resource_len(pdev, bar), res_name))
3846 			goto err_out;
3847 	} else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
3848 		if (!__request_mem_region(pci_resource_start(pdev, bar),
3849 					pci_resource_len(pdev, bar), res_name,
3850 					exclusive))
3851 			goto err_out;
3852 	}
3853 
3854 	dr = find_pci_dr(pdev);
3855 	if (dr)
3856 		dr->region_mask |= 1 << bar;
3857 
3858 	return 0;
3859 
3860 err_out:
3861 	pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar,
3862 		 &pdev->resource[bar]);
3863 	return -EBUSY;
3864 }
3865 
3866 /**
3867  * pci_request_region - Reserve PCI I/O and memory resource
3868  * @pdev: PCI device whose resources are to be reserved
3869  * @bar: BAR to be reserved
3870  * @res_name: Name to be associated with resource
3871  *
3872  * Mark the PCI region associated with PCI device @pdev BAR @bar as
3873  * being reserved by owner @res_name.  Do not access any
3874  * address inside the PCI regions unless this call returns
3875  * successfully.
3876  *
3877  * Returns 0 on success, or %EBUSY on error.  A warning
3878  * message is also printed on failure.
3879  */
pci_request_region(struct pci_dev * pdev,int bar,const char * res_name)3880 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
3881 {
3882 	return __pci_request_region(pdev, bar, res_name, 0);
3883 }
3884 EXPORT_SYMBOL(pci_request_region);
3885 
3886 /**
3887  * pci_release_selected_regions - Release selected PCI I/O and memory resources
3888  * @pdev: PCI device whose resources were previously reserved
3889  * @bars: Bitmask of BARs to be released
3890  *
3891  * Release selected PCI I/O and memory resources previously reserved.
3892  * Call this function only after all use of the PCI regions has ceased.
3893  */
pci_release_selected_regions(struct pci_dev * pdev,int bars)3894 void pci_release_selected_regions(struct pci_dev *pdev, int bars)
3895 {
3896 	int i;
3897 
3898 	for (i = 0; i < PCI_STD_NUM_BARS; i++)
3899 		if (bars & (1 << i))
3900 			pci_release_region(pdev, i);
3901 }
3902 EXPORT_SYMBOL(pci_release_selected_regions);
3903 
__pci_request_selected_regions(struct pci_dev * pdev,int bars,const char * res_name,int excl)3904 static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
3905 					  const char *res_name, int excl)
3906 {
3907 	int i;
3908 
3909 	for (i = 0; i < PCI_STD_NUM_BARS; i++)
3910 		if (bars & (1 << i))
3911 			if (__pci_request_region(pdev, i, res_name, excl))
3912 				goto err_out;
3913 	return 0;
3914 
3915 err_out:
3916 	while (--i >= 0)
3917 		if (bars & (1 << i))
3918 			pci_release_region(pdev, i);
3919 
3920 	return -EBUSY;
3921 }
3922 
3923 
3924 /**
3925  * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
3926  * @pdev: PCI device whose resources are to be reserved
3927  * @bars: Bitmask of BARs to be requested
3928  * @res_name: Name to be associated with resource
3929  */
pci_request_selected_regions(struct pci_dev * pdev,int bars,const char * res_name)3930 int pci_request_selected_regions(struct pci_dev *pdev, int bars,
3931 				 const char *res_name)
3932 {
3933 	return __pci_request_selected_regions(pdev, bars, res_name, 0);
3934 }
3935 EXPORT_SYMBOL(pci_request_selected_regions);
3936 
pci_request_selected_regions_exclusive(struct pci_dev * pdev,int bars,const char * res_name)3937 int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
3938 					   const char *res_name)
3939 {
3940 	return __pci_request_selected_regions(pdev, bars, res_name,
3941 			IORESOURCE_EXCLUSIVE);
3942 }
3943 EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3944 
3945 /**
3946  * pci_release_regions - Release reserved PCI I/O and memory resources
3947  * @pdev: PCI device whose resources were previously reserved by
3948  *	  pci_request_regions()
3949  *
3950  * Releases all PCI I/O and memory resources previously reserved by a
3951  * successful call to pci_request_regions().  Call this function only
3952  * after all use of the PCI regions has ceased.
3953  */
3954 
pci_release_regions(struct pci_dev * pdev)3955 void pci_release_regions(struct pci_dev *pdev)
3956 {
3957 	pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1);
3958 }
3959 EXPORT_SYMBOL(pci_release_regions);
3960 
3961 /**
3962  * pci_request_regions - Reserve PCI I/O and memory resources
3963  * @pdev: PCI device whose resources are to be reserved
3964  * @res_name: Name to be associated with resource.
3965  *
3966  * Mark all PCI regions associated with PCI device @pdev as
3967  * being reserved by owner @res_name.  Do not access any
3968  * address inside the PCI regions unless this call returns
3969  * successfully.
3970  *
3971  * Returns 0 on success, or %EBUSY on error.  A warning
3972  * message is also printed on failure.
3973  */
pci_request_regions(struct pci_dev * pdev,const char * res_name)3974 int pci_request_regions(struct pci_dev *pdev, const char *res_name)
3975 {
3976 	return pci_request_selected_regions(pdev,
3977 			((1 << PCI_STD_NUM_BARS) - 1), res_name);
3978 }
3979 EXPORT_SYMBOL(pci_request_regions);
3980 
3981 /**
3982  * pci_request_regions_exclusive - Reserve PCI I/O and memory resources
3983  * @pdev: PCI device whose resources are to be reserved
3984  * @res_name: Name to be associated with resource.
3985  *
3986  * Mark all PCI regions associated with PCI device @pdev as being reserved
3987  * by owner @res_name.  Do not access any address inside the PCI regions
3988  * unless this call returns successfully.
3989  *
3990  * pci_request_regions_exclusive() will mark the region so that /dev/mem
3991  * and the sysfs MMIO access will not be allowed.
3992  *
3993  * Returns 0 on success, or %EBUSY on error.  A warning message is also
3994  * printed on failure.
3995  */
pci_request_regions_exclusive(struct pci_dev * pdev,const char * res_name)3996 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
3997 {
3998 	return pci_request_selected_regions_exclusive(pdev,
3999 				((1 << PCI_STD_NUM_BARS) - 1), res_name);
4000 }
4001 EXPORT_SYMBOL(pci_request_regions_exclusive);
4002 
4003 /*
4004  * Record the PCI IO range (expressed as CPU physical address + size).
4005  * Return a negative value if an error has occurred, zero otherwise
4006  */
pci_register_io_range(struct fwnode_handle * fwnode,phys_addr_t addr,resource_size_t size)4007 int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
4008 			resource_size_t	size)
4009 {
4010 	int ret = 0;
4011 #ifdef PCI_IOBASE
4012 	struct logic_pio_hwaddr *range;
4013 
4014 	if (!size || addr + size < addr)
4015 		return -EINVAL;
4016 
4017 	range = kzalloc(sizeof(*range), GFP_ATOMIC);
4018 	if (!range)
4019 		return -ENOMEM;
4020 
4021 	range->fwnode = fwnode;
4022 	range->size = size;
4023 	range->hw_start = addr;
4024 	range->flags = LOGIC_PIO_CPU_MMIO;
4025 
4026 	ret = logic_pio_register_range(range);
4027 	if (ret)
4028 		kfree(range);
4029 
4030 	/* Ignore duplicates due to deferred probing */
4031 	if (ret == -EEXIST)
4032 		ret = 0;
4033 #endif
4034 
4035 	return ret;
4036 }
4037 
pci_pio_to_address(unsigned long pio)4038 phys_addr_t pci_pio_to_address(unsigned long pio)
4039 {
4040 	phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;
4041 
4042 #ifdef PCI_IOBASE
4043 	if (pio >= MMIO_UPPER_LIMIT)
4044 		return address;
4045 
4046 	address = logic_pio_to_hwaddr(pio);
4047 #endif
4048 
4049 	return address;
4050 }
4051 EXPORT_SYMBOL_GPL(pci_pio_to_address);
4052 
pci_address_to_pio(phys_addr_t address)4053 unsigned long __weak pci_address_to_pio(phys_addr_t address)
4054 {
4055 #ifdef PCI_IOBASE
4056 	return logic_pio_trans_cpuaddr(address);
4057 #else
4058 	if (address > IO_SPACE_LIMIT)
4059 		return (unsigned long)-1;
4060 
4061 	return (unsigned long) address;
4062 #endif
4063 }
4064 
4065 /**
4066  * pci_remap_iospace - Remap the memory mapped I/O space
4067  * @res: Resource describing the I/O space
4068  * @phys_addr: physical address of range to be mapped
4069  *
4070  * Remap the memory mapped I/O space described by the @res and the CPU
4071  * physical address @phys_addr into virtual address space.  Only
4072  * architectures that have memory mapped IO functions defined (and the
4073  * PCI_IOBASE value defined) should call this function.
4074  */
pci_remap_iospace(const struct resource * res,phys_addr_t phys_addr)4075 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
4076 {
4077 #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4078 	unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4079 
4080 	if (!(res->flags & IORESOURCE_IO))
4081 		return -EINVAL;
4082 
4083 	if (res->end > IO_SPACE_LIMIT)
4084 		return -EINVAL;
4085 
4086 	return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
4087 				  pgprot_device(PAGE_KERNEL));
4088 #else
4089 	/*
4090 	 * This architecture does not have memory mapped I/O space,
4091 	 * so this function should never be called
4092 	 */
4093 	WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
4094 	return -ENODEV;
4095 #endif
4096 }
4097 EXPORT_SYMBOL(pci_remap_iospace);
4098 
4099 /**
4100  * pci_unmap_iospace - Unmap the memory mapped I/O space
4101  * @res: resource to be unmapped
4102  *
4103  * Unmap the CPU virtual address @res from virtual address space.  Only
4104  * architectures that have memory mapped IO functions defined (and the
4105  * PCI_IOBASE value defined) should call this function.
4106  */
pci_unmap_iospace(struct resource * res)4107 void pci_unmap_iospace(struct resource *res)
4108 {
4109 #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4110 	unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4111 
4112 	unmap_kernel_range(vaddr, resource_size(res));
4113 #endif
4114 }
4115 EXPORT_SYMBOL(pci_unmap_iospace);
4116 
devm_pci_unmap_iospace(struct device * dev,void * ptr)4117 static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
4118 {
4119 	struct resource **res = ptr;
4120 
4121 	pci_unmap_iospace(*res);
4122 }
4123 
4124 /**
4125  * devm_pci_remap_iospace - Managed pci_remap_iospace()
4126  * @dev: Generic device to remap IO address for
4127  * @res: Resource describing the I/O space
4128  * @phys_addr: physical address of range to be mapped
4129  *
4130  * Managed pci_remap_iospace().  Map is automatically unmapped on driver
4131  * detach.
4132  */
devm_pci_remap_iospace(struct device * dev,const struct resource * res,phys_addr_t phys_addr)4133 int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
4134 			   phys_addr_t phys_addr)
4135 {
4136 	const struct resource **ptr;
4137 	int error;
4138 
4139 	ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
4140 	if (!ptr)
4141 		return -ENOMEM;
4142 
4143 	error = pci_remap_iospace(res, phys_addr);
4144 	if (error) {
4145 		devres_free(ptr);
4146 	} else	{
4147 		*ptr = res;
4148 		devres_add(dev, ptr);
4149 	}
4150 
4151 	return error;
4152 }
4153 EXPORT_SYMBOL(devm_pci_remap_iospace);
4154 
4155 /**
4156  * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
4157  * @dev: Generic device to remap IO address for
4158  * @offset: Resource address to map
4159  * @size: Size of map
4160  *
4161  * Managed pci_remap_cfgspace().  Map is automatically unmapped on driver
4162  * detach.
4163  */
devm_pci_remap_cfgspace(struct device * dev,resource_size_t offset,resource_size_t size)4164 void __iomem *devm_pci_remap_cfgspace(struct device *dev,
4165 				      resource_size_t offset,
4166 				      resource_size_t size)
4167 {
4168 	void __iomem **ptr, *addr;
4169 
4170 	ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
4171 	if (!ptr)
4172 		return NULL;
4173 
4174 	addr = pci_remap_cfgspace(offset, size);
4175 	if (addr) {
4176 		*ptr = addr;
4177 		devres_add(dev, ptr);
4178 	} else
4179 		devres_free(ptr);
4180 
4181 	return addr;
4182 }
4183 EXPORT_SYMBOL(devm_pci_remap_cfgspace);
4184 
4185 /**
4186  * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource
4187  * @dev: generic device to handle the resource for
4188  * @res: configuration space resource to be handled
4189  *
4190  * Checks that a resource is a valid memory region, requests the memory
4191  * region and ioremaps with pci_remap_cfgspace() API that ensures the
4192  * proper PCI configuration space memory attributes are guaranteed.
4193  *
4194  * All operations are managed and will be undone on driver detach.
4195  *
4196  * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
4197  * on failure. Usage example::
4198  *
4199  *	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4200  *	base = devm_pci_remap_cfg_resource(&pdev->dev, res);
4201  *	if (IS_ERR(base))
4202  *		return PTR_ERR(base);
4203  */
devm_pci_remap_cfg_resource(struct device * dev,struct resource * res)4204 void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
4205 					  struct resource *res)
4206 {
4207 	resource_size_t size;
4208 	const char *name;
4209 	void __iomem *dest_ptr;
4210 
4211 	BUG_ON(!dev);
4212 
4213 	if (!res || resource_type(res) != IORESOURCE_MEM) {
4214 		dev_err(dev, "invalid resource\n");
4215 		return IOMEM_ERR_PTR(-EINVAL);
4216 	}
4217 
4218 	size = resource_size(res);
4219 	name = res->name ?: dev_name(dev);
4220 
4221 	if (!devm_request_mem_region(dev, res->start, size, name)) {
4222 		dev_err(dev, "can't request region for resource %pR\n", res);
4223 		return IOMEM_ERR_PTR(-EBUSY);
4224 	}
4225 
4226 	dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
4227 	if (!dest_ptr) {
4228 		dev_err(dev, "ioremap failed for resource %pR\n", res);
4229 		devm_release_mem_region(dev, res->start, size);
4230 		dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
4231 	}
4232 
4233 	return dest_ptr;
4234 }
4235 EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
4236 
__pci_set_master(struct pci_dev * dev,bool enable)4237 static void __pci_set_master(struct pci_dev *dev, bool enable)
4238 {
4239 	u16 old_cmd, cmd;
4240 
4241 	pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
4242 	if (enable)
4243 		cmd = old_cmd | PCI_COMMAND_MASTER;
4244 	else
4245 		cmd = old_cmd & ~PCI_COMMAND_MASTER;
4246 	if (cmd != old_cmd) {
4247 		pci_dbg(dev, "%s bus mastering\n",
4248 			enable ? "enabling" : "disabling");
4249 		pci_write_config_word(dev, PCI_COMMAND, cmd);
4250 	}
4251 	dev->is_busmaster = enable;
4252 }
4253 
4254 /**
4255  * pcibios_setup - process "pci=" kernel boot arguments
4256  * @str: string used to pass in "pci=" kernel boot arguments
4257  *
4258  * Process kernel boot arguments.  This is the default implementation.
4259  * Architecture specific implementations can override this as necessary.
4260  */
pcibios_setup(char * str)4261 char * __weak __init pcibios_setup(char *str)
4262 {
4263 	return str;
4264 }
4265 
4266 /**
4267  * pcibios_set_master - enable PCI bus-mastering for device dev
4268  * @dev: the PCI device to enable
4269  *
4270  * Enables PCI bus-mastering for the device.  This is the default
4271  * implementation.  Architecture specific implementations can override
4272  * this if necessary.
4273  */
pcibios_set_master(struct pci_dev * dev)4274 void __weak pcibios_set_master(struct pci_dev *dev)
4275 {
4276 	u8 lat;
4277 
4278 	/* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
4279 	if (pci_is_pcie(dev))
4280 		return;
4281 
4282 	pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
4283 	if (lat < 16)
4284 		lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
4285 	else if (lat > pcibios_max_latency)
4286 		lat = pcibios_max_latency;
4287 	else
4288 		return;
4289 
4290 	pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
4291 }
4292 
4293 /**
4294  * pci_set_master - enables bus-mastering for device dev
4295  * @dev: the PCI device to enable
4296  *
4297  * Enables bus-mastering on the device and calls pcibios_set_master()
4298  * to do the needed arch specific settings.
4299  */
pci_set_master(struct pci_dev * dev)4300 void pci_set_master(struct pci_dev *dev)
4301 {
4302 	__pci_set_master(dev, true);
4303 	pcibios_set_master(dev);
4304 }
4305 EXPORT_SYMBOL(pci_set_master);
4306 
4307 /**
4308  * pci_clear_master - disables bus-mastering for device dev
4309  * @dev: the PCI device to disable
4310  */
pci_clear_master(struct pci_dev * dev)4311 void pci_clear_master(struct pci_dev *dev)
4312 {
4313 	__pci_set_master(dev, false);
4314 }
4315 EXPORT_SYMBOL(pci_clear_master);
4316 
4317 /**
4318  * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
4319  * @dev: the PCI device for which MWI is to be enabled
4320  *
4321  * Helper function for pci_set_mwi.
4322  * Originally copied from drivers/net/acenic.c.
4323  * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
4324  *
4325  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4326  */
pci_set_cacheline_size(struct pci_dev * dev)4327 int pci_set_cacheline_size(struct pci_dev *dev)
4328 {
4329 	u8 cacheline_size;
4330 
4331 	if (!pci_cache_line_size)
4332 		return -EINVAL;
4333 
4334 	/* Validate current setting: the PCI_CACHE_LINE_SIZE must be
4335 	   equal to or multiple of the right value. */
4336 	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4337 	if (cacheline_size >= pci_cache_line_size &&
4338 	    (cacheline_size % pci_cache_line_size) == 0)
4339 		return 0;
4340 
4341 	/* Write the correct value. */
4342 	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
4343 	/* Read it back. */
4344 	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4345 	if (cacheline_size == pci_cache_line_size)
4346 		return 0;
4347 
4348 	pci_info(dev, "cache line size of %d is not supported\n",
4349 		   pci_cache_line_size << 2);
4350 
4351 	return -EINVAL;
4352 }
4353 EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
4354 
4355 /**
4356  * pci_set_mwi - enables memory-write-invalidate PCI transaction
4357  * @dev: the PCI device for which MWI is enabled
4358  *
4359  * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4360  *
4361  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4362  */
pci_set_mwi(struct pci_dev * dev)4363 int pci_set_mwi(struct pci_dev *dev)
4364 {
4365 #ifdef PCI_DISABLE_MWI
4366 	return 0;
4367 #else
4368 	int rc;
4369 	u16 cmd;
4370 
4371 	rc = pci_set_cacheline_size(dev);
4372 	if (rc)
4373 		return rc;
4374 
4375 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
4376 	if (!(cmd & PCI_COMMAND_INVALIDATE)) {
4377 		pci_dbg(dev, "enabling Mem-Wr-Inval\n");
4378 		cmd |= PCI_COMMAND_INVALIDATE;
4379 		pci_write_config_word(dev, PCI_COMMAND, cmd);
4380 	}
4381 	return 0;
4382 #endif
4383 }
4384 EXPORT_SYMBOL(pci_set_mwi);
4385 
4386 /**
4387  * pcim_set_mwi - a device-managed pci_set_mwi()
4388  * @dev: the PCI device for which MWI is enabled
4389  *
4390  * Managed pci_set_mwi().
4391  *
4392  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4393  */
pcim_set_mwi(struct pci_dev * dev)4394 int pcim_set_mwi(struct pci_dev *dev)
4395 {
4396 	struct pci_devres *dr;
4397 
4398 	dr = find_pci_dr(dev);
4399 	if (!dr)
4400 		return -ENOMEM;
4401 
4402 	dr->mwi = 1;
4403 	return pci_set_mwi(dev);
4404 }
4405 EXPORT_SYMBOL(pcim_set_mwi);
4406 
4407 /**
4408  * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
4409  * @dev: the PCI device for which MWI is enabled
4410  *
4411  * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4412  * Callers are not required to check the return value.
4413  *
4414  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4415  */
pci_try_set_mwi(struct pci_dev * dev)4416 int pci_try_set_mwi(struct pci_dev *dev)
4417 {
4418 #ifdef PCI_DISABLE_MWI
4419 	return 0;
4420 #else
4421 	return pci_set_mwi(dev);
4422 #endif
4423 }
4424 EXPORT_SYMBOL(pci_try_set_mwi);
4425 
4426 /**
4427  * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
4428  * @dev: the PCI device to disable
4429  *
4430  * Disables PCI Memory-Write-Invalidate transaction on the device
4431  */
pci_clear_mwi(struct pci_dev * dev)4432 void pci_clear_mwi(struct pci_dev *dev)
4433 {
4434 #ifndef PCI_DISABLE_MWI
4435 	u16 cmd;
4436 
4437 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
4438 	if (cmd & PCI_COMMAND_INVALIDATE) {
4439 		cmd &= ~PCI_COMMAND_INVALIDATE;
4440 		pci_write_config_word(dev, PCI_COMMAND, cmd);
4441 	}
4442 #endif
4443 }
4444 EXPORT_SYMBOL(pci_clear_mwi);
4445 
4446 /**
4447  * pci_intx - enables/disables PCI INTx for device dev
4448  * @pdev: the PCI device to operate on
4449  * @enable: boolean: whether to enable or disable PCI INTx
4450  *
4451  * Enables/disables PCI INTx for device @pdev
4452  */
pci_intx(struct pci_dev * pdev,int enable)4453 void pci_intx(struct pci_dev *pdev, int enable)
4454 {
4455 	u16 pci_command, new;
4456 
4457 	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
4458 
4459 	if (enable)
4460 		new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
4461 	else
4462 		new = pci_command | PCI_COMMAND_INTX_DISABLE;
4463 
4464 	if (new != pci_command) {
4465 		struct pci_devres *dr;
4466 
4467 		pci_write_config_word(pdev, PCI_COMMAND, new);
4468 
4469 		dr = find_pci_dr(pdev);
4470 		if (dr && !dr->restore_intx) {
4471 			dr->restore_intx = 1;
4472 			dr->orig_intx = !enable;
4473 		}
4474 	}
4475 }
4476 EXPORT_SYMBOL_GPL(pci_intx);
4477 
pci_check_and_set_intx_mask(struct pci_dev * dev,bool mask)4478 static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
4479 {
4480 	struct pci_bus *bus = dev->bus;
4481 	bool mask_updated = true;
4482 	u32 cmd_status_dword;
4483 	u16 origcmd, newcmd;
4484 	unsigned long flags;
4485 	bool irq_pending;
4486 
4487 	/*
4488 	 * We do a single dword read to retrieve both command and status.
4489 	 * Document assumptions that make this possible.
4490 	 */
4491 	BUILD_BUG_ON(PCI_COMMAND % 4);
4492 	BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
4493 
4494 	raw_spin_lock_irqsave(&pci_lock, flags);
4495 
4496 	bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
4497 
4498 	irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
4499 
4500 	/*
4501 	 * Check interrupt status register to see whether our device
4502 	 * triggered the interrupt (when masking) or the next IRQ is
4503 	 * already pending (when unmasking).
4504 	 */
4505 	if (mask != irq_pending) {
4506 		mask_updated = false;
4507 		goto done;
4508 	}
4509 
4510 	origcmd = cmd_status_dword;
4511 	newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
4512 	if (mask)
4513 		newcmd |= PCI_COMMAND_INTX_DISABLE;
4514 	if (newcmd != origcmd)
4515 		bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
4516 
4517 done:
4518 	raw_spin_unlock_irqrestore(&pci_lock, flags);
4519 
4520 	return mask_updated;
4521 }
4522 
4523 /**
4524  * pci_check_and_mask_intx - mask INTx on pending interrupt
4525  * @dev: the PCI device to operate on
4526  *
4527  * Check if the device dev has its INTx line asserted, mask it and return
4528  * true in that case. False is returned if no interrupt was pending.
4529  */
pci_check_and_mask_intx(struct pci_dev * dev)4530 bool pci_check_and_mask_intx(struct pci_dev *dev)
4531 {
4532 	return pci_check_and_set_intx_mask(dev, true);
4533 }
4534 EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
4535 
4536 /**
4537  * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending
4538  * @dev: the PCI device to operate on
4539  *
4540  * Check if the device dev has its INTx line asserted, unmask it if not and
4541  * return true. False is returned and the mask remains active if there was
4542  * still an interrupt pending.
4543  */
pci_check_and_unmask_intx(struct pci_dev * dev)4544 bool pci_check_and_unmask_intx(struct pci_dev *dev)
4545 {
4546 	return pci_check_and_set_intx_mask(dev, false);
4547 }
4548 EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
4549 
4550 /**
4551  * pci_wait_for_pending_transaction - wait for pending transaction
4552  * @dev: the PCI device to operate on
4553  *
4554  * Return 0 if transaction is pending 1 otherwise.
4555  */
pci_wait_for_pending_transaction(struct pci_dev * dev)4556 int pci_wait_for_pending_transaction(struct pci_dev *dev)
4557 {
4558 	if (!pci_is_pcie(dev))
4559 		return 1;
4560 
4561 	return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
4562 				    PCI_EXP_DEVSTA_TRPND);
4563 }
4564 EXPORT_SYMBOL(pci_wait_for_pending_transaction);
4565 
4566 /**
4567  * pcie_has_flr - check if a device supports function level resets
4568  * @dev: device to check
4569  *
4570  * Returns true if the device advertises support for PCIe function level
4571  * resets.
4572  */
pcie_has_flr(struct pci_dev * dev)4573 bool pcie_has_flr(struct pci_dev *dev)
4574 {
4575 	u32 cap;
4576 
4577 	if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4578 		return false;
4579 
4580 	pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
4581 	return cap & PCI_EXP_DEVCAP_FLR;
4582 }
4583 EXPORT_SYMBOL_GPL(pcie_has_flr);
4584 
4585 /**
4586  * pcie_flr - initiate a PCIe function level reset
4587  * @dev: device to reset
4588  *
4589  * Initiate a function level reset on @dev.  The caller should ensure the
4590  * device supports FLR before calling this function, e.g. by using the
4591  * pcie_has_flr() helper.
4592  */
pcie_flr(struct pci_dev * dev)4593 int pcie_flr(struct pci_dev *dev)
4594 {
4595 	if (!pci_wait_for_pending_transaction(dev))
4596 		pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
4597 
4598 	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
4599 
4600 	if (dev->imm_ready)
4601 		return 0;
4602 
4603 	/*
4604 	 * Per PCIe r4.0, sec 6.6.2, a device must complete an FLR within
4605 	 * 100ms, but may silently discard requests while the FLR is in
4606 	 * progress.  Wait 100ms before trying to access the device.
4607 	 */
4608 	msleep(100);
4609 
4610 	return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS);
4611 }
4612 EXPORT_SYMBOL_GPL(pcie_flr);
4613 
pci_af_flr(struct pci_dev * dev,int probe)4614 static int pci_af_flr(struct pci_dev *dev, int probe)
4615 {
4616 	int pos;
4617 	u8 cap;
4618 
4619 	pos = pci_find_capability(dev, PCI_CAP_ID_AF);
4620 	if (!pos)
4621 		return -ENOTTY;
4622 
4623 	if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4624 		return -ENOTTY;
4625 
4626 	pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
4627 	if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
4628 		return -ENOTTY;
4629 
4630 	if (probe)
4631 		return 0;
4632 
4633 	/*
4634 	 * Wait for Transaction Pending bit to clear.  A word-aligned test
4635 	 * is used, so we use the control offset rather than status and shift
4636 	 * the test bit to match.
4637 	 */
4638 	if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
4639 				 PCI_AF_STATUS_TP << 8))
4640 		pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
4641 
4642 	pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
4643 
4644 	if (dev->imm_ready)
4645 		return 0;
4646 
4647 	/*
4648 	 * Per Advanced Capabilities for Conventional PCI ECN, 13 April 2006,
4649 	 * updated 27 July 2006; a device must complete an FLR within
4650 	 * 100ms, but may silently discard requests while the FLR is in
4651 	 * progress.  Wait 100ms before trying to access the device.
4652 	 */
4653 	msleep(100);
4654 
4655 	return pci_dev_wait(dev, "AF_FLR", PCIE_RESET_READY_POLL_MS);
4656 }
4657 
4658 /**
4659  * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
4660  * @dev: Device to reset.
4661  * @probe: If set, only check if the device can be reset this way.
4662  *
4663  * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
4664  * unset, it will be reinitialized internally when going from PCI_D3hot to
4665  * PCI_D0.  If that's the case and the device is not in a low-power state
4666  * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
4667  *
4668  * NOTE: This causes the caller to sleep for twice the device power transition
4669  * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
4670  * by default (i.e. unless the @dev's d3hot_delay field has a different value).
4671  * Moreover, only devices in D0 can be reset by this function.
4672  */
pci_pm_reset(struct pci_dev * dev,int probe)4673 static int pci_pm_reset(struct pci_dev *dev, int probe)
4674 {
4675 	u16 csr;
4676 
4677 	if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
4678 		return -ENOTTY;
4679 
4680 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
4681 	if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
4682 		return -ENOTTY;
4683 
4684 	if (probe)
4685 		return 0;
4686 
4687 	if (dev->current_state != PCI_D0)
4688 		return -EINVAL;
4689 
4690 	csr &= ~PCI_PM_CTRL_STATE_MASK;
4691 	csr |= PCI_D3hot;
4692 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4693 	pci_dev_d3_sleep(dev);
4694 
4695 	csr &= ~PCI_PM_CTRL_STATE_MASK;
4696 	csr |= PCI_D0;
4697 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4698 	pci_dev_d3_sleep(dev);
4699 
4700 	return pci_dev_wait(dev, "PM D3hot->D0", PCIE_RESET_READY_POLL_MS);
4701 }
4702 
4703 /**
4704  * pcie_wait_for_link_delay - Wait until link is active or inactive
4705  * @pdev: Bridge device
4706  * @active: waiting for active or inactive?
4707  * @delay: Delay to wait after link has become active (in ms)
4708  *
4709  * Use this to wait till link becomes active or inactive.
4710  */
pcie_wait_for_link_delay(struct pci_dev * pdev,bool active,int delay)4711 static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
4712 				     int delay)
4713 {
4714 	int timeout = 1000;
4715 	bool ret;
4716 	u16 lnk_status;
4717 
4718 	/*
4719 	 * Some controllers might not implement link active reporting. In this
4720 	 * case, we wait for 1000 ms + any delay requested by the caller.
4721 	 */
4722 	if (!pdev->link_active_reporting) {
4723 		msleep(timeout + delay);
4724 		return true;
4725 	}
4726 
4727 	/*
4728 	 * PCIe r4.0 sec 6.6.1, a component must enter LTSSM Detect within 20ms,
4729 	 * after which we should expect an link active if the reset was
4730 	 * successful. If so, software must wait a minimum 100ms before sending
4731 	 * configuration requests to devices downstream this port.
4732 	 *
4733 	 * If the link fails to activate, either the device was physically
4734 	 * removed or the link is permanently failed.
4735 	 */
4736 	if (active)
4737 		msleep(20);
4738 	for (;;) {
4739 		pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
4740 		ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
4741 		if (ret == active)
4742 			break;
4743 		if (timeout <= 0)
4744 			break;
4745 		msleep(10);
4746 		timeout -= 10;
4747 	}
4748 	if (active && ret)
4749 		msleep(delay);
4750 
4751 	return ret == active;
4752 }
4753 
4754 /**
4755  * pcie_wait_for_link - Wait until link is active or inactive
4756  * @pdev: Bridge device
4757  * @active: waiting for active or inactive?
4758  *
4759  * Use this to wait till link becomes active or inactive.
4760  */
pcie_wait_for_link(struct pci_dev * pdev,bool active)4761 bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
4762 {
4763 	return pcie_wait_for_link_delay(pdev, active, 100);
4764 }
4765 
4766 /*
4767  * Find maximum D3cold delay required by all the devices on the bus.  The
4768  * spec says 100 ms, but firmware can lower it and we allow drivers to
4769  * increase it as well.
4770  *
4771  * Called with @pci_bus_sem locked for reading.
4772  */
pci_bus_max_d3cold_delay(const struct pci_bus * bus)4773 static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
4774 {
4775 	const struct pci_dev *pdev;
4776 	int min_delay = 100;
4777 	int max_delay = 0;
4778 
4779 	list_for_each_entry(pdev, &bus->devices, bus_list) {
4780 		if (pdev->d3cold_delay < min_delay)
4781 			min_delay = pdev->d3cold_delay;
4782 		if (pdev->d3cold_delay > max_delay)
4783 			max_delay = pdev->d3cold_delay;
4784 	}
4785 
4786 	return max(min_delay, max_delay);
4787 }
4788 
4789 /**
4790  * pci_bridge_wait_for_secondary_bus - Wait for secondary bus to be accessible
4791  * @dev: PCI bridge
4792  * @reset_type: reset type in human-readable form
4793  * @timeout: maximum time to wait for devices on secondary bus (milliseconds)
4794  *
4795  * Handle necessary delays before access to the devices on the secondary
4796  * side of the bridge are permitted after D3cold to D0 transition
4797  * or Conventional Reset.
4798  *
4799  * For PCIe this means the delays in PCIe 5.0 section 6.6.1. For
4800  * conventional PCI it means Tpvrh + Trhfa specified in PCI 3.0 section
4801  * 4.3.2.
4802  *
4803  * Return 0 on success or -ENOTTY if the first device on the secondary bus
4804  * failed to become accessible.
4805  */
pci_bridge_wait_for_secondary_bus(struct pci_dev * dev,char * reset_type,int timeout)4806 int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type,
4807 				      int timeout)
4808 {
4809 	struct pci_dev *child;
4810 	int delay;
4811 
4812 	if (pci_dev_is_disconnected(dev))
4813 		return 0;
4814 
4815 	if (!pci_is_bridge(dev))
4816 		return 0;
4817 
4818 	down_read(&pci_bus_sem);
4819 
4820 	/*
4821 	 * We only deal with devices that are present currently on the bus.
4822 	 * For any hot-added devices the access delay is handled in pciehp
4823 	 * board_added(). In case of ACPI hotplug the firmware is expected
4824 	 * to configure the devices before OS is notified.
4825 	 */
4826 	if (!dev->subordinate || list_empty(&dev->subordinate->devices)) {
4827 		up_read(&pci_bus_sem);
4828 		return 0;
4829 	}
4830 
4831 	/* Take d3cold_delay requirements into account */
4832 	delay = pci_bus_max_d3cold_delay(dev->subordinate);
4833 	if (!delay) {
4834 		up_read(&pci_bus_sem);
4835 		return 0;
4836 	}
4837 
4838 	child = list_first_entry(&dev->subordinate->devices, struct pci_dev,
4839 				 bus_list);
4840 	up_read(&pci_bus_sem);
4841 
4842 	/*
4843 	 * Conventional PCI and PCI-X we need to wait Tpvrh + Trhfa before
4844 	 * accessing the device after reset (that is 1000 ms + 100 ms).
4845 	 */
4846 	if (!pci_is_pcie(dev)) {
4847 		pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay);
4848 		msleep(1000 + delay);
4849 		return 0;
4850 	}
4851 
4852 	/*
4853 	 * For PCIe downstream and root ports that do not support speeds
4854 	 * greater than 5 GT/s need to wait minimum 100 ms. For higher
4855 	 * speeds (gen3) we need to wait first for the data link layer to
4856 	 * become active.
4857 	 *
4858 	 * However, 100 ms is the minimum and the PCIe spec says the
4859 	 * software must allow at least 1s before it can determine that the
4860 	 * device that did not respond is a broken device. There is
4861 	 * evidence that 100 ms is not always enough, for example certain
4862 	 * Titan Ridge xHCI controller does not always respond to
4863 	 * configuration requests if we only wait for 100 ms (see
4864 	 * https://bugzilla.kernel.org/show_bug.cgi?id=203885).
4865 	 *
4866 	 * Therefore we wait for 100 ms and check for the device presence
4867 	 * until the timeout expires.
4868 	 */
4869 	if (!pcie_downstream_port(dev))
4870 		return 0;
4871 
4872 	if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
4873 		pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
4874 		msleep(delay);
4875 	} else {
4876 		pci_dbg(dev, "waiting %d ms for downstream link, after activation\n",
4877 			delay);
4878 		if (!pcie_wait_for_link_delay(dev, true, delay)) {
4879 			/* Did not train, no need to wait any further */
4880 			pci_info(dev, "Data Link Layer Link Active not set in 1000 msec\n");
4881 			return -ENOTTY;
4882 		}
4883 	}
4884 
4885 	return pci_dev_wait(child, reset_type, timeout - delay);
4886 }
4887 
pci_reset_secondary_bus(struct pci_dev * dev)4888 void pci_reset_secondary_bus(struct pci_dev *dev)
4889 {
4890 	u16 ctrl;
4891 
4892 	pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
4893 	ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
4894 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4895 
4896 	/*
4897 	 * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms.  Double
4898 	 * this to 2ms to ensure that we meet the minimum requirement.
4899 	 */
4900 	msleep(2);
4901 
4902 	ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
4903 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4904 }
4905 
pcibios_reset_secondary_bus(struct pci_dev * dev)4906 void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
4907 {
4908 	pci_reset_secondary_bus(dev);
4909 }
4910 
4911 /**
4912  * pci_bridge_secondary_bus_reset - Reset the secondary bus on a PCI bridge.
4913  * @dev: Bridge device
4914  *
4915  * Use the bridge control register to assert reset on the secondary bus.
4916  * Devices on the secondary bus are left in power-on state.
4917  */
pci_bridge_secondary_bus_reset(struct pci_dev * dev)4918 int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
4919 {
4920 	pcibios_reset_secondary_bus(dev);
4921 
4922 	return pci_bridge_wait_for_secondary_bus(dev, "bus reset",
4923 						 PCIE_RESET_READY_POLL_MS);
4924 }
4925 EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
4926 
pci_parent_bus_reset(struct pci_dev * dev,int probe)4927 static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
4928 {
4929 	struct pci_dev *pdev;
4930 
4931 	if (pci_is_root_bus(dev->bus) || dev->subordinate ||
4932 	    !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4933 		return -ENOTTY;
4934 
4935 	list_for_each_entry(pdev, &dev->bus->devices, bus_list)
4936 		if (pdev != dev)
4937 			return -ENOTTY;
4938 
4939 	if (probe)
4940 		return 0;
4941 
4942 	return pci_bridge_secondary_bus_reset(dev->bus->self);
4943 }
4944 
pci_reset_hotplug_slot(struct hotplug_slot * hotplug,int probe)4945 static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
4946 {
4947 	int rc = -ENOTTY;
4948 
4949 	if (!hotplug || !try_module_get(hotplug->owner))
4950 		return rc;
4951 
4952 	if (hotplug->ops->reset_slot)
4953 		rc = hotplug->ops->reset_slot(hotplug, probe);
4954 
4955 	module_put(hotplug->owner);
4956 
4957 	return rc;
4958 }
4959 
pci_dev_reset_slot_function(struct pci_dev * dev,int probe)4960 static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
4961 {
4962 	if (dev->multifunction || dev->subordinate || !dev->slot ||
4963 	    dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4964 		return -ENOTTY;
4965 
4966 	return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
4967 }
4968 
pci_dev_lock(struct pci_dev * dev)4969 static void pci_dev_lock(struct pci_dev *dev)
4970 {
4971 	/* block PM suspend, driver probe, etc. */
4972 	device_lock(&dev->dev);
4973 	pci_cfg_access_lock(dev);
4974 }
4975 
4976 /* Return 1 on successful lock, 0 on contention */
pci_dev_trylock(struct pci_dev * dev)4977 static int pci_dev_trylock(struct pci_dev *dev)
4978 {
4979 	if (device_trylock(&dev->dev)) {
4980 		if (pci_cfg_access_trylock(dev))
4981 			return 1;
4982 		device_unlock(&dev->dev);
4983 	}
4984 
4985 	return 0;
4986 }
4987 
pci_dev_unlock(struct pci_dev * dev)4988 static void pci_dev_unlock(struct pci_dev *dev)
4989 {
4990 	pci_cfg_access_unlock(dev);
4991 	device_unlock(&dev->dev);
4992 }
4993 
pci_dev_save_and_disable(struct pci_dev * dev)4994 static void pci_dev_save_and_disable(struct pci_dev *dev)
4995 {
4996 	const struct pci_error_handlers *err_handler =
4997 			dev->driver ? dev->driver->err_handler : NULL;
4998 
4999 	/*
5000 	 * dev->driver->err_handler->reset_prepare() is protected against
5001 	 * races with ->remove() by the device lock, which must be held by
5002 	 * the caller.
5003 	 */
5004 	if (err_handler && err_handler->reset_prepare)
5005 		err_handler->reset_prepare(dev);
5006 
5007 	/*
5008 	 * Wake-up device prior to save.  PM registers default to D0 after
5009 	 * reset and a simple register restore doesn't reliably return
5010 	 * to a non-D0 state anyway.
5011 	 */
5012 	pci_set_power_state(dev, PCI_D0);
5013 
5014 	pci_save_state(dev);
5015 	/*
5016 	 * Disable the device by clearing the Command register, except for
5017 	 * INTx-disable which is set.  This not only disables MMIO and I/O port
5018 	 * BARs, but also prevents the device from being Bus Master, preventing
5019 	 * DMA from the device including MSI/MSI-X interrupts.  For PCI 2.3
5020 	 * compliant devices, INTx-disable prevents legacy interrupts.
5021 	 */
5022 	pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
5023 }
5024 
pci_dev_restore(struct pci_dev * dev)5025 static void pci_dev_restore(struct pci_dev *dev)
5026 {
5027 	const struct pci_error_handlers *err_handler =
5028 			dev->driver ? dev->driver->err_handler : NULL;
5029 
5030 	pci_restore_state(dev);
5031 
5032 	/*
5033 	 * dev->driver->err_handler->reset_done() is protected against
5034 	 * races with ->remove() by the device lock, which must be held by
5035 	 * the caller.
5036 	 */
5037 	if (err_handler && err_handler->reset_done)
5038 		err_handler->reset_done(dev);
5039 }
5040 
5041 /**
5042  * __pci_reset_function_locked - reset a PCI device function while holding
5043  * the @dev mutex lock.
5044  * @dev: PCI device to reset
5045  *
5046  * Some devices allow an individual function to be reset without affecting
5047  * other functions in the same device.  The PCI device must be responsive
5048  * to PCI config space in order to use this function.
5049  *
5050  * The device function is presumed to be unused and the caller is holding
5051  * the device mutex lock when this function is called.
5052  *
5053  * Resetting the device will make the contents of PCI configuration space
5054  * random, so any caller of this must be prepared to reinitialise the
5055  * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
5056  * etc.
5057  *
5058  * Returns 0 if the device function was successfully reset or negative if the
5059  * device doesn't support resetting a single function.
5060  */
__pci_reset_function_locked(struct pci_dev * dev)5061 int __pci_reset_function_locked(struct pci_dev *dev)
5062 {
5063 	int rc;
5064 
5065 	might_sleep();
5066 
5067 	/*
5068 	 * A reset method returns -ENOTTY if it doesn't support this device
5069 	 * and we should try the next method.
5070 	 *
5071 	 * If it returns 0 (success), we're finished.  If it returns any
5072 	 * other error, we're also finished: this indicates that further
5073 	 * reset mechanisms might be broken on the device.
5074 	 */
5075 	rc = pci_dev_specific_reset(dev, 0);
5076 	if (rc != -ENOTTY)
5077 		return rc;
5078 	if (pcie_has_flr(dev)) {
5079 		rc = pcie_flr(dev);
5080 		if (rc != -ENOTTY)
5081 			return rc;
5082 	}
5083 	rc = pci_af_flr(dev, 0);
5084 	if (rc != -ENOTTY)
5085 		return rc;
5086 	rc = pci_pm_reset(dev, 0);
5087 	if (rc != -ENOTTY)
5088 		return rc;
5089 	rc = pci_dev_reset_slot_function(dev, 0);
5090 	if (rc != -ENOTTY)
5091 		return rc;
5092 	return pci_parent_bus_reset(dev, 0);
5093 }
5094 EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
5095 
5096 /**
5097  * pci_probe_reset_function - check whether the device can be safely reset
5098  * @dev: PCI device to reset
5099  *
5100  * Some devices allow an individual function to be reset without affecting
5101  * other functions in the same device.  The PCI device must be responsive
5102  * to PCI config space in order to use this function.
5103  *
5104  * Returns 0 if the device function can be reset or negative if the
5105  * device doesn't support resetting a single function.
5106  */
pci_probe_reset_function(struct pci_dev * dev)5107 int pci_probe_reset_function(struct pci_dev *dev)
5108 {
5109 	int rc;
5110 
5111 	might_sleep();
5112 
5113 	rc = pci_dev_specific_reset(dev, 1);
5114 	if (rc != -ENOTTY)
5115 		return rc;
5116 	if (pcie_has_flr(dev))
5117 		return 0;
5118 	rc = pci_af_flr(dev, 1);
5119 	if (rc != -ENOTTY)
5120 		return rc;
5121 	rc = pci_pm_reset(dev, 1);
5122 	if (rc != -ENOTTY)
5123 		return rc;
5124 	rc = pci_dev_reset_slot_function(dev, 1);
5125 	if (rc != -ENOTTY)
5126 		return rc;
5127 
5128 	return pci_parent_bus_reset(dev, 1);
5129 }
5130 
5131 /**
5132  * pci_reset_function - quiesce and reset a PCI device function
5133  * @dev: PCI device to reset
5134  *
5135  * Some devices allow an individual function to be reset without affecting
5136  * other functions in the same device.  The PCI device must be responsive
5137  * to PCI config space in order to use this function.
5138  *
5139  * This function does not just reset the PCI portion of a device, but
5140  * clears all the state associated with the device.  This function differs
5141  * from __pci_reset_function_locked() in that it saves and restores device state
5142  * over the reset and takes the PCI device lock.
5143  *
5144  * Returns 0 if the device function was successfully reset or negative if the
5145  * device doesn't support resetting a single function.
5146  */
pci_reset_function(struct pci_dev * dev)5147 int pci_reset_function(struct pci_dev *dev)
5148 {
5149 	int rc;
5150 
5151 	if (!dev->reset_fn)
5152 		return -ENOTTY;
5153 
5154 	pci_dev_lock(dev);
5155 	pci_dev_save_and_disable(dev);
5156 
5157 	rc = __pci_reset_function_locked(dev);
5158 
5159 	pci_dev_restore(dev);
5160 	pci_dev_unlock(dev);
5161 
5162 	return rc;
5163 }
5164 EXPORT_SYMBOL_GPL(pci_reset_function);
5165 
5166 /**
5167  * pci_reset_function_locked - quiesce and reset a PCI device function
5168  * @dev: PCI device to reset
5169  *
5170  * Some devices allow an individual function to be reset without affecting
5171  * other functions in the same device.  The PCI device must be responsive
5172  * to PCI config space in order to use this function.
5173  *
5174  * This function does not just reset the PCI portion of a device, but
5175  * clears all the state associated with the device.  This function differs
5176  * from __pci_reset_function_locked() in that it saves and restores device state
5177  * over the reset.  It also differs from pci_reset_function() in that it
5178  * requires the PCI device lock to be held.
5179  *
5180  * Returns 0 if the device function was successfully reset or negative if the
5181  * device doesn't support resetting a single function.
5182  */
pci_reset_function_locked(struct pci_dev * dev)5183 int pci_reset_function_locked(struct pci_dev *dev)
5184 {
5185 	int rc;
5186 
5187 	if (!dev->reset_fn)
5188 		return -ENOTTY;
5189 
5190 	pci_dev_save_and_disable(dev);
5191 
5192 	rc = __pci_reset_function_locked(dev);
5193 
5194 	pci_dev_restore(dev);
5195 
5196 	return rc;
5197 }
5198 EXPORT_SYMBOL_GPL(pci_reset_function_locked);
5199 
5200 /**
5201  * pci_try_reset_function - quiesce and reset a PCI device function
5202  * @dev: PCI device to reset
5203  *
5204  * Same as above, except return -EAGAIN if unable to lock device.
5205  */
pci_try_reset_function(struct pci_dev * dev)5206 int pci_try_reset_function(struct pci_dev *dev)
5207 {
5208 	int rc;
5209 
5210 	if (!dev->reset_fn)
5211 		return -ENOTTY;
5212 
5213 	if (!pci_dev_trylock(dev))
5214 		return -EAGAIN;
5215 
5216 	pci_dev_save_and_disable(dev);
5217 	rc = __pci_reset_function_locked(dev);
5218 	pci_dev_restore(dev);
5219 	pci_dev_unlock(dev);
5220 
5221 	return rc;
5222 }
5223 EXPORT_SYMBOL_GPL(pci_try_reset_function);
5224 
5225 /* Do any devices on or below this bus prevent a bus reset? */
pci_bus_resetable(struct pci_bus * bus)5226 static bool pci_bus_resetable(struct pci_bus *bus)
5227 {
5228 	struct pci_dev *dev;
5229 
5230 
5231 	if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5232 		return false;
5233 
5234 	list_for_each_entry(dev, &bus->devices, bus_list) {
5235 		if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5236 		    (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5237 			return false;
5238 	}
5239 
5240 	return true;
5241 }
5242 
5243 /* Lock devices from the top of the tree down */
pci_bus_lock(struct pci_bus * bus)5244 static void pci_bus_lock(struct pci_bus *bus)
5245 {
5246 	struct pci_dev *dev;
5247 
5248 	list_for_each_entry(dev, &bus->devices, bus_list) {
5249 		pci_dev_lock(dev);
5250 		if (dev->subordinate)
5251 			pci_bus_lock(dev->subordinate);
5252 	}
5253 }
5254 
5255 /* Unlock devices from the bottom of the tree up */
pci_bus_unlock(struct pci_bus * bus)5256 static void pci_bus_unlock(struct pci_bus *bus)
5257 {
5258 	struct pci_dev *dev;
5259 
5260 	list_for_each_entry(dev, &bus->devices, bus_list) {
5261 		if (dev->subordinate)
5262 			pci_bus_unlock(dev->subordinate);
5263 		pci_dev_unlock(dev);
5264 	}
5265 }
5266 
5267 /* Return 1 on successful lock, 0 on contention */
pci_bus_trylock(struct pci_bus * bus)5268 static int pci_bus_trylock(struct pci_bus *bus)
5269 {
5270 	struct pci_dev *dev;
5271 
5272 	list_for_each_entry(dev, &bus->devices, bus_list) {
5273 		if (!pci_dev_trylock(dev))
5274 			goto unlock;
5275 		if (dev->subordinate) {
5276 			if (!pci_bus_trylock(dev->subordinate)) {
5277 				pci_dev_unlock(dev);
5278 				goto unlock;
5279 			}
5280 		}
5281 	}
5282 	return 1;
5283 
5284 unlock:
5285 	list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
5286 		if (dev->subordinate)
5287 			pci_bus_unlock(dev->subordinate);
5288 		pci_dev_unlock(dev);
5289 	}
5290 	return 0;
5291 }
5292 
5293 /* Do any devices on or below this slot prevent a bus reset? */
pci_slot_resetable(struct pci_slot * slot)5294 static bool pci_slot_resetable(struct pci_slot *slot)
5295 {
5296 	struct pci_dev *dev;
5297 
5298 	if (slot->bus->self &&
5299 	    (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5300 		return false;
5301 
5302 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5303 		if (!dev->slot || dev->slot != slot)
5304 			continue;
5305 		if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5306 		    (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5307 			return false;
5308 	}
5309 
5310 	return true;
5311 }
5312 
5313 /* Lock devices from the top of the tree down */
pci_slot_lock(struct pci_slot * slot)5314 static void pci_slot_lock(struct pci_slot *slot)
5315 {
5316 	struct pci_dev *dev;
5317 
5318 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5319 		if (!dev->slot || dev->slot != slot)
5320 			continue;
5321 		pci_dev_lock(dev);
5322 		if (dev->subordinate)
5323 			pci_bus_lock(dev->subordinate);
5324 	}
5325 }
5326 
5327 /* Unlock devices from the bottom of the tree up */
pci_slot_unlock(struct pci_slot * slot)5328 static void pci_slot_unlock(struct pci_slot *slot)
5329 {
5330 	struct pci_dev *dev;
5331 
5332 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5333 		if (!dev->slot || dev->slot != slot)
5334 			continue;
5335 		if (dev->subordinate)
5336 			pci_bus_unlock(dev->subordinate);
5337 		pci_dev_unlock(dev);
5338 	}
5339 }
5340 
5341 /* Return 1 on successful lock, 0 on contention */
pci_slot_trylock(struct pci_slot * slot)5342 static int pci_slot_trylock(struct pci_slot *slot)
5343 {
5344 	struct pci_dev *dev;
5345 
5346 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5347 		if (!dev->slot || dev->slot != slot)
5348 			continue;
5349 		if (!pci_dev_trylock(dev))
5350 			goto unlock;
5351 		if (dev->subordinate) {
5352 			if (!pci_bus_trylock(dev->subordinate)) {
5353 				pci_dev_unlock(dev);
5354 				goto unlock;
5355 			}
5356 		}
5357 	}
5358 	return 1;
5359 
5360 unlock:
5361 	list_for_each_entry_continue_reverse(dev,
5362 					     &slot->bus->devices, bus_list) {
5363 		if (!dev->slot || dev->slot != slot)
5364 			continue;
5365 		if (dev->subordinate)
5366 			pci_bus_unlock(dev->subordinate);
5367 		pci_dev_unlock(dev);
5368 	}
5369 	return 0;
5370 }
5371 
5372 /*
5373  * Save and disable devices from the top of the tree down while holding
5374  * the @dev mutex lock for the entire tree.
5375  */
pci_bus_save_and_disable_locked(struct pci_bus * bus)5376 static void pci_bus_save_and_disable_locked(struct pci_bus *bus)
5377 {
5378 	struct pci_dev *dev;
5379 
5380 	list_for_each_entry(dev, &bus->devices, bus_list) {
5381 		pci_dev_save_and_disable(dev);
5382 		if (dev->subordinate)
5383 			pci_bus_save_and_disable_locked(dev->subordinate);
5384 	}
5385 }
5386 
5387 /*
5388  * Restore devices from top of the tree down while holding @dev mutex lock
5389  * for the entire tree.  Parent bridges need to be restored before we can
5390  * get to subordinate devices.
5391  */
pci_bus_restore_locked(struct pci_bus * bus)5392 static void pci_bus_restore_locked(struct pci_bus *bus)
5393 {
5394 	struct pci_dev *dev;
5395 
5396 	list_for_each_entry(dev, &bus->devices, bus_list) {
5397 		pci_dev_restore(dev);
5398 		if (dev->subordinate)
5399 			pci_bus_restore_locked(dev->subordinate);
5400 	}
5401 }
5402 
5403 /*
5404  * Save and disable devices from the top of the tree down while holding
5405  * the @dev mutex lock for the entire tree.
5406  */
pci_slot_save_and_disable_locked(struct pci_slot * slot)5407 static void pci_slot_save_and_disable_locked(struct pci_slot *slot)
5408 {
5409 	struct pci_dev *dev;
5410 
5411 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5412 		if (!dev->slot || dev->slot != slot)
5413 			continue;
5414 		pci_dev_save_and_disable(dev);
5415 		if (dev->subordinate)
5416 			pci_bus_save_and_disable_locked(dev->subordinate);
5417 	}
5418 }
5419 
5420 /*
5421  * Restore devices from top of the tree down while holding @dev mutex lock
5422  * for the entire tree.  Parent bridges need to be restored before we can
5423  * get to subordinate devices.
5424  */
pci_slot_restore_locked(struct pci_slot * slot)5425 static void pci_slot_restore_locked(struct pci_slot *slot)
5426 {
5427 	struct pci_dev *dev;
5428 
5429 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5430 		if (!dev->slot || dev->slot != slot)
5431 			continue;
5432 		pci_dev_restore(dev);
5433 		if (dev->subordinate)
5434 			pci_bus_restore_locked(dev->subordinate);
5435 	}
5436 }
5437 
pci_slot_reset(struct pci_slot * slot,int probe)5438 static int pci_slot_reset(struct pci_slot *slot, int probe)
5439 {
5440 	int rc;
5441 
5442 	if (!slot || !pci_slot_resetable(slot))
5443 		return -ENOTTY;
5444 
5445 	if (!probe)
5446 		pci_slot_lock(slot);
5447 
5448 	might_sleep();
5449 
5450 	rc = pci_reset_hotplug_slot(slot->hotplug, probe);
5451 
5452 	if (!probe)
5453 		pci_slot_unlock(slot);
5454 
5455 	return rc;
5456 }
5457 
5458 /**
5459  * pci_probe_reset_slot - probe whether a PCI slot can be reset
5460  * @slot: PCI slot to probe
5461  *
5462  * Return 0 if slot can be reset, negative if a slot reset is not supported.
5463  */
pci_probe_reset_slot(struct pci_slot * slot)5464 int pci_probe_reset_slot(struct pci_slot *slot)
5465 {
5466 	return pci_slot_reset(slot, 1);
5467 }
5468 EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
5469 
5470 /**
5471  * __pci_reset_slot - Try to reset a PCI slot
5472  * @slot: PCI slot to reset
5473  *
5474  * A PCI bus may host multiple slots, each slot may support a reset mechanism
5475  * independent of other slots.  For instance, some slots may support slot power
5476  * control.  In the case of a 1:1 bus to slot architecture, this function may
5477  * wrap the bus reset to avoid spurious slot related events such as hotplug.
5478  * Generally a slot reset should be attempted before a bus reset.  All of the
5479  * function of the slot and any subordinate buses behind the slot are reset
5480  * through this function.  PCI config space of all devices in the slot and
5481  * behind the slot is saved before and restored after reset.
5482  *
5483  * Same as above except return -EAGAIN if the slot cannot be locked
5484  */
__pci_reset_slot(struct pci_slot * slot)5485 static int __pci_reset_slot(struct pci_slot *slot)
5486 {
5487 	int rc;
5488 
5489 	rc = pci_slot_reset(slot, 1);
5490 	if (rc)
5491 		return rc;
5492 
5493 	if (pci_slot_trylock(slot)) {
5494 		pci_slot_save_and_disable_locked(slot);
5495 		might_sleep();
5496 		rc = pci_reset_hotplug_slot(slot->hotplug, 0);
5497 		pci_slot_restore_locked(slot);
5498 		pci_slot_unlock(slot);
5499 	} else
5500 		rc = -EAGAIN;
5501 
5502 	return rc;
5503 }
5504 
pci_bus_reset(struct pci_bus * bus,int probe)5505 static int pci_bus_reset(struct pci_bus *bus, int probe)
5506 {
5507 	int ret;
5508 
5509 	if (!bus->self || !pci_bus_resetable(bus))
5510 		return -ENOTTY;
5511 
5512 	if (probe)
5513 		return 0;
5514 
5515 	pci_bus_lock(bus);
5516 
5517 	might_sleep();
5518 
5519 	ret = pci_bridge_secondary_bus_reset(bus->self);
5520 
5521 	pci_bus_unlock(bus);
5522 
5523 	return ret;
5524 }
5525 
5526 /**
5527  * pci_bus_error_reset - reset the bridge's subordinate bus
5528  * @bridge: The parent device that connects to the bus to reset
5529  *
5530  * This function will first try to reset the slots on this bus if the method is
5531  * available. If slot reset fails or is not available, this will fall back to a
5532  * secondary bus reset.
5533  */
pci_bus_error_reset(struct pci_dev * bridge)5534 int pci_bus_error_reset(struct pci_dev *bridge)
5535 {
5536 	struct pci_bus *bus = bridge->subordinate;
5537 	struct pci_slot *slot;
5538 
5539 	if (!bus)
5540 		return -ENOTTY;
5541 
5542 	mutex_lock(&pci_slot_mutex);
5543 	if (list_empty(&bus->slots))
5544 		goto bus_reset;
5545 
5546 	list_for_each_entry(slot, &bus->slots, list)
5547 		if (pci_probe_reset_slot(slot))
5548 			goto bus_reset;
5549 
5550 	list_for_each_entry(slot, &bus->slots, list)
5551 		if (pci_slot_reset(slot, 0))
5552 			goto bus_reset;
5553 
5554 	mutex_unlock(&pci_slot_mutex);
5555 	return 0;
5556 bus_reset:
5557 	mutex_unlock(&pci_slot_mutex);
5558 	return pci_bus_reset(bridge->subordinate, 0);
5559 }
5560 
5561 /**
5562  * pci_probe_reset_bus - probe whether a PCI bus can be reset
5563  * @bus: PCI bus to probe
5564  *
5565  * Return 0 if bus can be reset, negative if a bus reset is not supported.
5566  */
pci_probe_reset_bus(struct pci_bus * bus)5567 int pci_probe_reset_bus(struct pci_bus *bus)
5568 {
5569 	return pci_bus_reset(bus, 1);
5570 }
5571 EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
5572 
5573 /**
5574  * __pci_reset_bus - Try to reset a PCI bus
5575  * @bus: top level PCI bus to reset
5576  *
5577  * Same as above except return -EAGAIN if the bus cannot be locked
5578  */
__pci_reset_bus(struct pci_bus * bus)5579 static int __pci_reset_bus(struct pci_bus *bus)
5580 {
5581 	int rc;
5582 
5583 	rc = pci_bus_reset(bus, 1);
5584 	if (rc)
5585 		return rc;
5586 
5587 	if (pci_bus_trylock(bus)) {
5588 		pci_bus_save_and_disable_locked(bus);
5589 		might_sleep();
5590 		rc = pci_bridge_secondary_bus_reset(bus->self);
5591 		pci_bus_restore_locked(bus);
5592 		pci_bus_unlock(bus);
5593 	} else
5594 		rc = -EAGAIN;
5595 
5596 	return rc;
5597 }
5598 
5599 /**
5600  * pci_reset_bus - Try to reset a PCI bus
5601  * @pdev: top level PCI device to reset via slot/bus
5602  *
5603  * Same as above except return -EAGAIN if the bus cannot be locked
5604  */
pci_reset_bus(struct pci_dev * pdev)5605 int pci_reset_bus(struct pci_dev *pdev)
5606 {
5607 	return (!pci_probe_reset_slot(pdev->slot)) ?
5608 	    __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
5609 }
5610 EXPORT_SYMBOL_GPL(pci_reset_bus);
5611 
5612 /**
5613  * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
5614  * @dev: PCI device to query
5615  *
5616  * Returns mmrbc: maximum designed memory read count in bytes or
5617  * appropriate error value.
5618  */
pcix_get_max_mmrbc(struct pci_dev * dev)5619 int pcix_get_max_mmrbc(struct pci_dev *dev)
5620 {
5621 	int cap;
5622 	u32 stat;
5623 
5624 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5625 	if (!cap)
5626 		return -EINVAL;
5627 
5628 	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5629 		return -EINVAL;
5630 
5631 	return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
5632 }
5633 EXPORT_SYMBOL(pcix_get_max_mmrbc);
5634 
5635 /**
5636  * pcix_get_mmrbc - get PCI-X maximum memory read byte count
5637  * @dev: PCI device to query
5638  *
5639  * Returns mmrbc: maximum memory read count in bytes or appropriate error
5640  * value.
5641  */
pcix_get_mmrbc(struct pci_dev * dev)5642 int pcix_get_mmrbc(struct pci_dev *dev)
5643 {
5644 	int cap;
5645 	u16 cmd;
5646 
5647 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5648 	if (!cap)
5649 		return -EINVAL;
5650 
5651 	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5652 		return -EINVAL;
5653 
5654 	return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
5655 }
5656 EXPORT_SYMBOL(pcix_get_mmrbc);
5657 
5658 /**
5659  * pcix_set_mmrbc - set PCI-X maximum memory read byte count
5660  * @dev: PCI device to query
5661  * @mmrbc: maximum memory read count in bytes
5662  *    valid values are 512, 1024, 2048, 4096
5663  *
5664  * If possible sets maximum memory read byte count, some bridges have errata
5665  * that prevent this.
5666  */
pcix_set_mmrbc(struct pci_dev * dev,int mmrbc)5667 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
5668 {
5669 	int cap;
5670 	u32 stat, v, o;
5671 	u16 cmd;
5672 
5673 	if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
5674 		return -EINVAL;
5675 
5676 	v = ffs(mmrbc) - 10;
5677 
5678 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5679 	if (!cap)
5680 		return -EINVAL;
5681 
5682 	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5683 		return -EINVAL;
5684 
5685 	if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
5686 		return -E2BIG;
5687 
5688 	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5689 		return -EINVAL;
5690 
5691 	o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
5692 	if (o != v) {
5693 		if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
5694 			return -EIO;
5695 
5696 		cmd &= ~PCI_X_CMD_MAX_READ;
5697 		cmd |= v << 2;
5698 		if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
5699 			return -EIO;
5700 	}
5701 	return 0;
5702 }
5703 EXPORT_SYMBOL(pcix_set_mmrbc);
5704 
5705 /**
5706  * pcie_get_readrq - get PCI Express read request size
5707  * @dev: PCI device to query
5708  *
5709  * Returns maximum memory read request in bytes or appropriate error value.
5710  */
pcie_get_readrq(struct pci_dev * dev)5711 int pcie_get_readrq(struct pci_dev *dev)
5712 {
5713 	u16 ctl;
5714 
5715 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5716 
5717 	return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5718 }
5719 EXPORT_SYMBOL(pcie_get_readrq);
5720 
5721 /**
5722  * pcie_set_readrq - set PCI Express maximum memory read request
5723  * @dev: PCI device to query
5724  * @rq: maximum memory read count in bytes
5725  *    valid values are 128, 256, 512, 1024, 2048, 4096
5726  *
5727  * If possible sets maximum memory read request in bytes
5728  */
pcie_set_readrq(struct pci_dev * dev,int rq)5729 int pcie_set_readrq(struct pci_dev *dev, int rq)
5730 {
5731 	u16 v;
5732 	int ret;
5733 	struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
5734 
5735 	if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
5736 		return -EINVAL;
5737 
5738 	/*
5739 	 * If using the "performance" PCIe config, we clamp the read rq
5740 	 * size to the max packet size to keep the host bridge from
5741 	 * generating requests larger than we can cope with.
5742 	 */
5743 	if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
5744 		int mps = pcie_get_mps(dev);
5745 
5746 		if (mps < rq)
5747 			rq = mps;
5748 	}
5749 
5750 	v = (ffs(rq) - 8) << 12;
5751 
5752 	if (bridge->no_inc_mrrs) {
5753 		int max_mrrs = pcie_get_readrq(dev);
5754 
5755 		if (rq > max_mrrs) {
5756 			pci_info(dev, "can't set Max_Read_Request_Size to %d; max is %d\n", rq, max_mrrs);
5757 			return -EINVAL;
5758 		}
5759 	}
5760 
5761 	ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5762 						  PCI_EXP_DEVCTL_READRQ, v);
5763 
5764 	return pcibios_err_to_errno(ret);
5765 }
5766 EXPORT_SYMBOL(pcie_set_readrq);
5767 
5768 /**
5769  * pcie_get_mps - get PCI Express maximum payload size
5770  * @dev: PCI device to query
5771  *
5772  * Returns maximum payload size in bytes
5773  */
pcie_get_mps(struct pci_dev * dev)5774 int pcie_get_mps(struct pci_dev *dev)
5775 {
5776 	u16 ctl;
5777 
5778 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5779 
5780 	return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5781 }
5782 EXPORT_SYMBOL(pcie_get_mps);
5783 
5784 /**
5785  * pcie_set_mps - set PCI Express maximum payload size
5786  * @dev: PCI device to query
5787  * @mps: maximum payload size in bytes
5788  *    valid values are 128, 256, 512, 1024, 2048, 4096
5789  *
5790  * If possible sets maximum payload size
5791  */
pcie_set_mps(struct pci_dev * dev,int mps)5792 int pcie_set_mps(struct pci_dev *dev, int mps)
5793 {
5794 	u16 v;
5795 	int ret;
5796 
5797 	if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
5798 		return -EINVAL;
5799 
5800 	v = ffs(mps) - 8;
5801 	if (v > dev->pcie_mpss)
5802 		return -EINVAL;
5803 	v <<= 5;
5804 
5805 	ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5806 						  PCI_EXP_DEVCTL_PAYLOAD, v);
5807 
5808 	return pcibios_err_to_errno(ret);
5809 }
5810 EXPORT_SYMBOL(pcie_set_mps);
5811 
5812 /**
5813  * pcie_bandwidth_available - determine minimum link settings of a PCIe
5814  *			      device and its bandwidth limitation
5815  * @dev: PCI device to query
5816  * @limiting_dev: storage for device causing the bandwidth limitation
5817  * @speed: storage for speed of limiting device
5818  * @width: storage for width of limiting device
5819  *
5820  * Walk up the PCI device chain and find the point where the minimum
5821  * bandwidth is available.  Return the bandwidth available there and (if
5822  * limiting_dev, speed, and width pointers are supplied) information about
5823  * that point.  The bandwidth returned is in Mb/s, i.e., megabits/second of
5824  * raw bandwidth.
5825  */
pcie_bandwidth_available(struct pci_dev * dev,struct pci_dev ** limiting_dev,enum pci_bus_speed * speed,enum pcie_link_width * width)5826 u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
5827 			     enum pci_bus_speed *speed,
5828 			     enum pcie_link_width *width)
5829 {
5830 	u16 lnksta;
5831 	enum pci_bus_speed next_speed;
5832 	enum pcie_link_width next_width;
5833 	u32 bw, next_bw;
5834 
5835 	if (speed)
5836 		*speed = PCI_SPEED_UNKNOWN;
5837 	if (width)
5838 		*width = PCIE_LNK_WIDTH_UNKNOWN;
5839 
5840 	bw = 0;
5841 
5842 	while (dev) {
5843 		pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
5844 
5845 		next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
5846 		next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
5847 			PCI_EXP_LNKSTA_NLW_SHIFT;
5848 
5849 		next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
5850 
5851 		/* Check if current device limits the total bandwidth */
5852 		if (!bw || next_bw <= bw) {
5853 			bw = next_bw;
5854 
5855 			if (limiting_dev)
5856 				*limiting_dev = dev;
5857 			if (speed)
5858 				*speed = next_speed;
5859 			if (width)
5860 				*width = next_width;
5861 		}
5862 
5863 		dev = pci_upstream_bridge(dev);
5864 	}
5865 
5866 	return bw;
5867 }
5868 EXPORT_SYMBOL(pcie_bandwidth_available);
5869 
5870 /**
5871  * pcie_get_speed_cap - query for the PCI device's link speed capability
5872  * @dev: PCI device to query
5873  *
5874  * Query the PCI device speed capability.  Return the maximum link speed
5875  * supported by the device.
5876  */
pcie_get_speed_cap(struct pci_dev * dev)5877 enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
5878 {
5879 	u32 lnkcap2, lnkcap;
5880 
5881 	/*
5882 	 * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18.  The
5883 	 * implementation note there recommends using the Supported Link
5884 	 * Speeds Vector in Link Capabilities 2 when supported.
5885 	 *
5886 	 * Without Link Capabilities 2, i.e., prior to PCIe r3.0, software
5887 	 * should use the Supported Link Speeds field in Link Capabilities,
5888 	 * where only 2.5 GT/s and 5.0 GT/s speeds were defined.
5889 	 */
5890 	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
5891 
5892 	/* PCIe r3.0-compliant */
5893 	if (lnkcap2)
5894 		return PCIE_LNKCAP2_SLS2SPEED(lnkcap2);
5895 
5896 	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
5897 	if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
5898 		return PCIE_SPEED_5_0GT;
5899 	else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB)
5900 		return PCIE_SPEED_2_5GT;
5901 
5902 	return PCI_SPEED_UNKNOWN;
5903 }
5904 EXPORT_SYMBOL(pcie_get_speed_cap);
5905 
5906 /**
5907  * pcie_get_width_cap - query for the PCI device's link width capability
5908  * @dev: PCI device to query
5909  *
5910  * Query the PCI device width capability.  Return the maximum link width
5911  * supported by the device.
5912  */
pcie_get_width_cap(struct pci_dev * dev)5913 enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
5914 {
5915 	u32 lnkcap;
5916 
5917 	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
5918 	if (lnkcap)
5919 		return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
5920 
5921 	return PCIE_LNK_WIDTH_UNKNOWN;
5922 }
5923 EXPORT_SYMBOL(pcie_get_width_cap);
5924 
5925 /**
5926  * pcie_bandwidth_capable - calculate a PCI device's link bandwidth capability
5927  * @dev: PCI device
5928  * @speed: storage for link speed
5929  * @width: storage for link width
5930  *
5931  * Calculate a PCI device's link bandwidth by querying for its link speed
5932  * and width, multiplying them, and applying encoding overhead.  The result
5933  * is in Mb/s, i.e., megabits/second of raw bandwidth.
5934  */
pcie_bandwidth_capable(struct pci_dev * dev,enum pci_bus_speed * speed,enum pcie_link_width * width)5935 u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
5936 			   enum pcie_link_width *width)
5937 {
5938 	*speed = pcie_get_speed_cap(dev);
5939 	*width = pcie_get_width_cap(dev);
5940 
5941 	if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
5942 		return 0;
5943 
5944 	return *width * PCIE_SPEED2MBS_ENC(*speed);
5945 }
5946 
5947 /**
5948  * __pcie_print_link_status - Report the PCI device's link speed and width
5949  * @dev: PCI device to query
5950  * @verbose: Print info even when enough bandwidth is available
5951  *
5952  * If the available bandwidth at the device is less than the device is
5953  * capable of, report the device's maximum possible bandwidth and the
5954  * upstream link that limits its performance.  If @verbose, always print
5955  * the available bandwidth, even if the device isn't constrained.
5956  */
__pcie_print_link_status(struct pci_dev * dev,bool verbose)5957 void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
5958 {
5959 	enum pcie_link_width width, width_cap;
5960 	enum pci_bus_speed speed, speed_cap;
5961 	struct pci_dev *limiting_dev = NULL;
5962 	u32 bw_avail, bw_cap;
5963 
5964 	bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
5965 	bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
5966 
5967 	if (bw_avail >= bw_cap && verbose)
5968 		pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
5969 			 bw_cap / 1000, bw_cap % 1000,
5970 			 pci_speed_string(speed_cap), width_cap);
5971 	else if (bw_avail < bw_cap)
5972 		pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
5973 			 bw_avail / 1000, bw_avail % 1000,
5974 			 pci_speed_string(speed), width,
5975 			 limiting_dev ? pci_name(limiting_dev) : "<unknown>",
5976 			 bw_cap / 1000, bw_cap % 1000,
5977 			 pci_speed_string(speed_cap), width_cap);
5978 }
5979 
5980 /**
5981  * pcie_print_link_status - Report the PCI device's link speed and width
5982  * @dev: PCI device to query
5983  *
5984  * Report the available bandwidth at the device.
5985  */
pcie_print_link_status(struct pci_dev * dev)5986 void pcie_print_link_status(struct pci_dev *dev)
5987 {
5988 	__pcie_print_link_status(dev, true);
5989 }
5990 EXPORT_SYMBOL(pcie_print_link_status);
5991 
5992 /**
5993  * pci_select_bars - Make BAR mask from the type of resource
5994  * @dev: the PCI device for which BAR mask is made
5995  * @flags: resource type mask to be selected
5996  *
5997  * This helper routine makes bar mask from the type of resource.
5998  */
pci_select_bars(struct pci_dev * dev,unsigned long flags)5999 int pci_select_bars(struct pci_dev *dev, unsigned long flags)
6000 {
6001 	int i, bars = 0;
6002 	for (i = 0; i < PCI_NUM_RESOURCES; i++)
6003 		if (pci_resource_flags(dev, i) & flags)
6004 			bars |= (1 << i);
6005 	return bars;
6006 }
6007 EXPORT_SYMBOL(pci_select_bars);
6008 
6009 /* Some architectures require additional programming to enable VGA */
6010 static arch_set_vga_state_t arch_set_vga_state;
6011 
pci_register_set_vga_state(arch_set_vga_state_t func)6012 void __init pci_register_set_vga_state(arch_set_vga_state_t func)
6013 {
6014 	arch_set_vga_state = func;	/* NULL disables */
6015 }
6016 
pci_set_vga_state_arch(struct pci_dev * dev,bool decode,unsigned int command_bits,u32 flags)6017 static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
6018 				  unsigned int command_bits, u32 flags)
6019 {
6020 	if (arch_set_vga_state)
6021 		return arch_set_vga_state(dev, decode, command_bits,
6022 						flags);
6023 	return 0;
6024 }
6025 
6026 /**
6027  * pci_set_vga_state - set VGA decode state on device and parents if requested
6028  * @dev: the PCI device
6029  * @decode: true = enable decoding, false = disable decoding
6030  * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
6031  * @flags: traverse ancestors and change bridges
6032  * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
6033  */
pci_set_vga_state(struct pci_dev * dev,bool decode,unsigned int command_bits,u32 flags)6034 int pci_set_vga_state(struct pci_dev *dev, bool decode,
6035 		      unsigned int command_bits, u32 flags)
6036 {
6037 	struct pci_bus *bus;
6038 	struct pci_dev *bridge;
6039 	u16 cmd;
6040 	int rc;
6041 
6042 	WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
6043 
6044 	/* ARCH specific VGA enables */
6045 	rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
6046 	if (rc)
6047 		return rc;
6048 
6049 	if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
6050 		pci_read_config_word(dev, PCI_COMMAND, &cmd);
6051 		if (decode)
6052 			cmd |= command_bits;
6053 		else
6054 			cmd &= ~command_bits;
6055 		pci_write_config_word(dev, PCI_COMMAND, cmd);
6056 	}
6057 
6058 	if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
6059 		return 0;
6060 
6061 	bus = dev->bus;
6062 	while (bus) {
6063 		bridge = bus->self;
6064 		if (bridge) {
6065 			pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
6066 					     &cmd);
6067 			if (decode)
6068 				cmd |= PCI_BRIDGE_CTL_VGA;
6069 			else
6070 				cmd &= ~PCI_BRIDGE_CTL_VGA;
6071 			pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
6072 					      cmd);
6073 		}
6074 		bus = bus->parent;
6075 	}
6076 	return 0;
6077 }
6078 
6079 #ifdef CONFIG_ACPI
pci_pr3_present(struct pci_dev * pdev)6080 bool pci_pr3_present(struct pci_dev *pdev)
6081 {
6082 	struct acpi_device *adev;
6083 
6084 	if (acpi_disabled)
6085 		return false;
6086 
6087 	adev = ACPI_COMPANION(&pdev->dev);
6088 	if (!adev)
6089 		return false;
6090 
6091 	return adev->power.flags.power_resources &&
6092 		acpi_has_method(adev->handle, "_PR3");
6093 }
6094 EXPORT_SYMBOL_GPL(pci_pr3_present);
6095 #endif
6096 
6097 /**
6098  * pci_add_dma_alias - Add a DMA devfn alias for a device
6099  * @dev: the PCI device for which alias is added
6100  * @devfn_from: alias slot and function
6101  * @nr_devfns: number of subsequent devfns to alias
6102  *
6103  * This helper encodes an 8-bit devfn as a bit number in dma_alias_mask
6104  * which is used to program permissible bus-devfn source addresses for DMA
6105  * requests in an IOMMU.  These aliases factor into IOMMU group creation
6106  * and are useful for devices generating DMA requests beyond or different
6107  * from their logical bus-devfn.  Examples include device quirks where the
6108  * device simply uses the wrong devfn, as well as non-transparent bridges
6109  * where the alias may be a proxy for devices in another domain.
6110  *
6111  * IOMMU group creation is performed during device discovery or addition,
6112  * prior to any potential DMA mapping and therefore prior to driver probing
6113  * (especially for userspace assigned devices where IOMMU group definition
6114  * cannot be left as a userspace activity).  DMA aliases should therefore
6115  * be configured via quirks, such as the PCI fixup header quirk.
6116  */
pci_add_dma_alias(struct pci_dev * dev,u8 devfn_from,unsigned nr_devfns)6117 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns)
6118 {
6119 	int devfn_to;
6120 
6121 	nr_devfns = min(nr_devfns, (unsigned) MAX_NR_DEVFNS - devfn_from);
6122 	devfn_to = devfn_from + nr_devfns - 1;
6123 
6124 	if (!dev->dma_alias_mask)
6125 		dev->dma_alias_mask = bitmap_zalloc(MAX_NR_DEVFNS, GFP_KERNEL);
6126 	if (!dev->dma_alias_mask) {
6127 		pci_warn(dev, "Unable to allocate DMA alias mask\n");
6128 		return;
6129 	}
6130 
6131 	bitmap_set(dev->dma_alias_mask, devfn_from, nr_devfns);
6132 
6133 	if (nr_devfns == 1)
6134 		pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
6135 				PCI_SLOT(devfn_from), PCI_FUNC(devfn_from));
6136 	else if (nr_devfns > 1)
6137 		pci_info(dev, "Enabling fixed DMA alias for devfn range from %02x.%d to %02x.%d\n",
6138 				PCI_SLOT(devfn_from), PCI_FUNC(devfn_from),
6139 				PCI_SLOT(devfn_to), PCI_FUNC(devfn_to));
6140 }
6141 
pci_devs_are_dma_aliases(struct pci_dev * dev1,struct pci_dev * dev2)6142 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
6143 {
6144 	return (dev1->dma_alias_mask &&
6145 		test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
6146 	       (dev2->dma_alias_mask &&
6147 		test_bit(dev1->devfn, dev2->dma_alias_mask)) ||
6148 	       pci_real_dma_dev(dev1) == dev2 ||
6149 	       pci_real_dma_dev(dev2) == dev1;
6150 }
6151 
pci_device_is_present(struct pci_dev * pdev)6152 bool pci_device_is_present(struct pci_dev *pdev)
6153 {
6154 	u32 v;
6155 
6156 	/* Check PF if pdev is a VF, since VF Vendor/Device IDs are 0xffff */
6157 	pdev = pci_physfn(pdev);
6158 	if (pci_dev_is_disconnected(pdev))
6159 		return false;
6160 	return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
6161 }
6162 EXPORT_SYMBOL_GPL(pci_device_is_present);
6163 
pci_ignore_hotplug(struct pci_dev * dev)6164 void pci_ignore_hotplug(struct pci_dev *dev)
6165 {
6166 	struct pci_dev *bridge = dev->bus->self;
6167 
6168 	dev->ignore_hotplug = 1;
6169 	/* Propagate the "ignore hotplug" setting to the parent bridge. */
6170 	if (bridge)
6171 		bridge->ignore_hotplug = 1;
6172 }
6173 EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
6174 
6175 /**
6176  * pci_real_dma_dev - Get PCI DMA device for PCI device
6177  * @dev: the PCI device that may have a PCI DMA alias
6178  *
6179  * Permits the platform to provide architecture-specific functionality to
6180  * devices needing to alias DMA to another PCI device on another PCI bus. If
6181  * the PCI device is on the same bus, it is recommended to use
6182  * pci_add_dma_alias(). This is the default implementation. Architecture
6183  * implementations can override this.
6184  */
pci_real_dma_dev(struct pci_dev * dev)6185 struct pci_dev __weak *pci_real_dma_dev(struct pci_dev *dev)
6186 {
6187 	return dev;
6188 }
6189 
pcibios_default_alignment(void)6190 resource_size_t __weak pcibios_default_alignment(void)
6191 {
6192 	return 0;
6193 }
6194 
6195 /*
6196  * Arches that don't want to expose struct resource to userland as-is in
6197  * sysfs and /proc can implement their own pci_resource_to_user().
6198  */
pci_resource_to_user(const struct pci_dev * dev,int bar,const struct resource * rsrc,resource_size_t * start,resource_size_t * end)6199 void __weak pci_resource_to_user(const struct pci_dev *dev, int bar,
6200 				 const struct resource *rsrc,
6201 				 resource_size_t *start, resource_size_t *end)
6202 {
6203 	*start = rsrc->start;
6204 	*end = rsrc->end;
6205 }
6206 
6207 static char *resource_alignment_param;
6208 static DEFINE_SPINLOCK(resource_alignment_lock);
6209 
6210 /**
6211  * pci_specified_resource_alignment - get resource alignment specified by user.
6212  * @dev: the PCI device to get
6213  * @resize: whether or not to change resources' size when reassigning alignment
6214  *
6215  * RETURNS: Resource alignment if it is specified.
6216  *          Zero if it is not specified.
6217  */
pci_specified_resource_alignment(struct pci_dev * dev,bool * resize)6218 static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
6219 							bool *resize)
6220 {
6221 	int align_order, count;
6222 	resource_size_t align = pcibios_default_alignment();
6223 	const char *p;
6224 	int ret;
6225 
6226 	spin_lock(&resource_alignment_lock);
6227 	p = resource_alignment_param;
6228 	if (!p || !*p)
6229 		goto out;
6230 	if (pci_has_flag(PCI_PROBE_ONLY)) {
6231 		align = 0;
6232 		pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
6233 		goto out;
6234 	}
6235 
6236 	while (*p) {
6237 		count = 0;
6238 		if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
6239 		    p[count] == '@') {
6240 			p += count + 1;
6241 			if (align_order > 63) {
6242 				pr_err("PCI: Invalid requested alignment (order %d)\n",
6243 				       align_order);
6244 				align_order = PAGE_SHIFT;
6245 			}
6246 		} else {
6247 			align_order = PAGE_SHIFT;
6248 		}
6249 
6250 		ret = pci_dev_str_match(dev, p, &p);
6251 		if (ret == 1) {
6252 			*resize = true;
6253 			align = 1ULL << align_order;
6254 			break;
6255 		} else if (ret < 0) {
6256 			pr_err("PCI: Can't parse resource_alignment parameter: %s\n",
6257 			       p);
6258 			break;
6259 		}
6260 
6261 		if (*p != ';' && *p != ',') {
6262 			/* End of param or invalid format */
6263 			break;
6264 		}
6265 		p++;
6266 	}
6267 out:
6268 	spin_unlock(&resource_alignment_lock);
6269 	return align;
6270 }
6271 
pci_request_resource_alignment(struct pci_dev * dev,int bar,resource_size_t align,bool resize)6272 static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
6273 					   resource_size_t align, bool resize)
6274 {
6275 	struct resource *r = &dev->resource[bar];
6276 	resource_size_t size;
6277 
6278 	if (!(r->flags & IORESOURCE_MEM))
6279 		return;
6280 
6281 	if (r->flags & IORESOURCE_PCI_FIXED) {
6282 		pci_info(dev, "BAR%d %pR: ignoring requested alignment %#llx\n",
6283 			 bar, r, (unsigned long long)align);
6284 		return;
6285 	}
6286 
6287 	size = resource_size(r);
6288 	if (size >= align)
6289 		return;
6290 
6291 	/*
6292 	 * Increase the alignment of the resource.  There are two ways we
6293 	 * can do this:
6294 	 *
6295 	 * 1) Increase the size of the resource.  BARs are aligned on their
6296 	 *    size, so when we reallocate space for this resource, we'll
6297 	 *    allocate it with the larger alignment.  This also prevents
6298 	 *    assignment of any other BARs inside the alignment region, so
6299 	 *    if we're requesting page alignment, this means no other BARs
6300 	 *    will share the page.
6301 	 *
6302 	 *    The disadvantage is that this makes the resource larger than
6303 	 *    the hardware BAR, which may break drivers that compute things
6304 	 *    based on the resource size, e.g., to find registers at a
6305 	 *    fixed offset before the end of the BAR.
6306 	 *
6307 	 * 2) Retain the resource size, but use IORESOURCE_STARTALIGN and
6308 	 *    set r->start to the desired alignment.  By itself this
6309 	 *    doesn't prevent other BARs being put inside the alignment
6310 	 *    region, but if we realign *every* resource of every device in
6311 	 *    the system, none of them will share an alignment region.
6312 	 *
6313 	 * When the user has requested alignment for only some devices via
6314 	 * the "pci=resource_alignment" argument, "resize" is true and we
6315 	 * use the first method.  Otherwise we assume we're aligning all
6316 	 * devices and we use the second.
6317 	 */
6318 
6319 	pci_info(dev, "BAR%d %pR: requesting alignment to %#llx\n",
6320 		 bar, r, (unsigned long long)align);
6321 
6322 	if (resize) {
6323 		r->start = 0;
6324 		r->end = align - 1;
6325 	} else {
6326 		r->flags &= ~IORESOURCE_SIZEALIGN;
6327 		r->flags |= IORESOURCE_STARTALIGN;
6328 		r->start = align;
6329 		r->end = r->start + size - 1;
6330 	}
6331 	r->flags |= IORESOURCE_UNSET;
6332 }
6333 
6334 /*
6335  * This function disables memory decoding and releases memory resources
6336  * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
6337  * It also rounds up size to specified alignment.
6338  * Later on, the kernel will assign page-aligned memory resource back
6339  * to the device.
6340  */
pci_reassigndev_resource_alignment(struct pci_dev * dev)6341 void pci_reassigndev_resource_alignment(struct pci_dev *dev)
6342 {
6343 	int i;
6344 	struct resource *r;
6345 	resource_size_t align;
6346 	u16 command;
6347 	bool resize = false;
6348 
6349 	/*
6350 	 * VF BARs are read-only zero according to SR-IOV spec r1.1, sec
6351 	 * 3.4.1.11.  Their resources are allocated from the space
6352 	 * described by the VF BARx register in the PF's SR-IOV capability.
6353 	 * We can't influence their alignment here.
6354 	 */
6355 	if (dev->is_virtfn)
6356 		return;
6357 
6358 	/* check if specified PCI is target device to reassign */
6359 	align = pci_specified_resource_alignment(dev, &resize);
6360 	if (!align)
6361 		return;
6362 
6363 	if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
6364 	    (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
6365 		pci_warn(dev, "Can't reassign resources to host bridge\n");
6366 		return;
6367 	}
6368 
6369 	pci_read_config_word(dev, PCI_COMMAND, &command);
6370 	command &= ~PCI_COMMAND_MEMORY;
6371 	pci_write_config_word(dev, PCI_COMMAND, command);
6372 
6373 	for (i = 0; i <= PCI_ROM_RESOURCE; i++)
6374 		pci_request_resource_alignment(dev, i, align, resize);
6375 
6376 	/*
6377 	 * Need to disable bridge's resource window,
6378 	 * to enable the kernel to reassign new resource
6379 	 * window later on.
6380 	 */
6381 	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
6382 		for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
6383 			r = &dev->resource[i];
6384 			if (!(r->flags & IORESOURCE_MEM))
6385 				continue;
6386 			r->flags |= IORESOURCE_UNSET;
6387 			r->end = resource_size(r) - 1;
6388 			r->start = 0;
6389 		}
6390 		pci_disable_bridge_window(dev);
6391 	}
6392 }
6393 
resource_alignment_show(struct bus_type * bus,char * buf)6394 static ssize_t resource_alignment_show(struct bus_type *bus, char *buf)
6395 {
6396 	size_t count = 0;
6397 
6398 	spin_lock(&resource_alignment_lock);
6399 	if (resource_alignment_param)
6400 		count = scnprintf(buf, PAGE_SIZE, "%s", resource_alignment_param);
6401 	spin_unlock(&resource_alignment_lock);
6402 
6403 	/*
6404 	 * When set by the command line, resource_alignment_param will not
6405 	 * have a trailing line feed, which is ugly. So conditionally add
6406 	 * it here.
6407 	 */
6408 	if (count >= 2 && buf[count - 2] != '\n' && count < PAGE_SIZE - 1) {
6409 		buf[count - 1] = '\n';
6410 		buf[count++] = 0;
6411 	}
6412 
6413 	return count;
6414 }
6415 
resource_alignment_store(struct bus_type * bus,const char * buf,size_t count)6416 static ssize_t resource_alignment_store(struct bus_type *bus,
6417 					const char *buf, size_t count)
6418 {
6419 	char *param = kstrndup(buf, count, GFP_KERNEL);
6420 
6421 	if (!param)
6422 		return -ENOMEM;
6423 
6424 	spin_lock(&resource_alignment_lock);
6425 	kfree(resource_alignment_param);
6426 	resource_alignment_param = param;
6427 	spin_unlock(&resource_alignment_lock);
6428 	return count;
6429 }
6430 
6431 static BUS_ATTR_RW(resource_alignment);
6432 
pci_resource_alignment_sysfs_init(void)6433 static int __init pci_resource_alignment_sysfs_init(void)
6434 {
6435 	return bus_create_file(&pci_bus_type,
6436 					&bus_attr_resource_alignment);
6437 }
6438 late_initcall(pci_resource_alignment_sysfs_init);
6439 
pci_no_domains(void)6440 static void pci_no_domains(void)
6441 {
6442 #ifdef CONFIG_PCI_DOMAINS
6443 	pci_domains_supported = 0;
6444 #endif
6445 }
6446 
6447 #ifdef CONFIG_PCI_DOMAINS_GENERIC
6448 static atomic_t __domain_nr = ATOMIC_INIT(-1);
6449 
pci_get_new_domain_nr(void)6450 static int pci_get_new_domain_nr(void)
6451 {
6452 	return atomic_inc_return(&__domain_nr);
6453 }
6454 
of_pci_bus_find_domain_nr(struct device * parent)6455 static int of_pci_bus_find_domain_nr(struct device *parent)
6456 {
6457 	static int use_dt_domains = -1;
6458 	int domain = -1;
6459 
6460 	if (parent)
6461 		domain = of_get_pci_domain_nr(parent->of_node);
6462 
6463 	/*
6464 	 * Check DT domain and use_dt_domains values.
6465 	 *
6466 	 * If DT domain property is valid (domain >= 0) and
6467 	 * use_dt_domains != 0, the DT assignment is valid since this means
6468 	 * we have not previously allocated a domain number by using
6469 	 * pci_get_new_domain_nr(); we should also update use_dt_domains to
6470 	 * 1, to indicate that we have just assigned a domain number from
6471 	 * DT.
6472 	 *
6473 	 * If DT domain property value is not valid (ie domain < 0), and we
6474 	 * have not previously assigned a domain number from DT
6475 	 * (use_dt_domains != 1) we should assign a domain number by
6476 	 * using the:
6477 	 *
6478 	 * pci_get_new_domain_nr()
6479 	 *
6480 	 * API and update the use_dt_domains value to keep track of method we
6481 	 * are using to assign domain numbers (use_dt_domains = 0).
6482 	 *
6483 	 * All other combinations imply we have a platform that is trying
6484 	 * to mix domain numbers obtained from DT and pci_get_new_domain_nr(),
6485 	 * which is a recipe for domain mishandling and it is prevented by
6486 	 * invalidating the domain value (domain = -1) and printing a
6487 	 * corresponding error.
6488 	 */
6489 	if (domain >= 0 && use_dt_domains) {
6490 		use_dt_domains = 1;
6491 	} else if (domain < 0 && use_dt_domains != 1) {
6492 		use_dt_domains = 0;
6493 		domain = pci_get_new_domain_nr();
6494 	} else {
6495 		if (parent)
6496 			pr_err("Node %pOF has ", parent->of_node);
6497 		pr_err("Inconsistent \"linux,pci-domain\" property in DT\n");
6498 		domain = -1;
6499 	}
6500 
6501 	return domain;
6502 }
6503 
pci_bus_find_domain_nr(struct pci_bus * bus,struct device * parent)6504 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
6505 {
6506 	return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
6507 			       acpi_pci_bus_find_domain_nr(bus);
6508 }
6509 #endif
6510 
6511 /**
6512  * pci_ext_cfg_avail - can we access extended PCI config space?
6513  *
6514  * Returns 1 if we can access PCI extended config space (offsets
6515  * greater than 0xff). This is the default implementation. Architecture
6516  * implementations can override this.
6517  */
pci_ext_cfg_avail(void)6518 int __weak pci_ext_cfg_avail(void)
6519 {
6520 	return 1;
6521 }
6522 
pci_fixup_cardbus(struct pci_bus * bus)6523 void __weak pci_fixup_cardbus(struct pci_bus *bus)
6524 {
6525 }
6526 EXPORT_SYMBOL(pci_fixup_cardbus);
6527 
pci_setup(char * str)6528 static int __init pci_setup(char *str)
6529 {
6530 	while (str) {
6531 		char *k = strchr(str, ',');
6532 		if (k)
6533 			*k++ = 0;
6534 		if (*str && (str = pcibios_setup(str)) && *str) {
6535 			if (!strcmp(str, "nomsi")) {
6536 				pci_no_msi();
6537 			} else if (!strncmp(str, "noats", 5)) {
6538 				pr_info("PCIe: ATS is disabled\n");
6539 				pcie_ats_disabled = true;
6540 			} else if (!strcmp(str, "noaer")) {
6541 				pci_no_aer();
6542 			} else if (!strcmp(str, "earlydump")) {
6543 				pci_early_dump = true;
6544 			} else if (!strncmp(str, "realloc=", 8)) {
6545 				pci_realloc_get_opt(str + 8);
6546 			} else if (!strncmp(str, "realloc", 7)) {
6547 				pci_realloc_get_opt("on");
6548 			} else if (!strcmp(str, "nodomains")) {
6549 				pci_no_domains();
6550 			} else if (!strncmp(str, "noari", 5)) {
6551 				pcie_ari_disabled = true;
6552 			} else if (!strncmp(str, "cbiosize=", 9)) {
6553 				pci_cardbus_io_size = memparse(str + 9, &str);
6554 			} else if (!strncmp(str, "cbmemsize=", 10)) {
6555 				pci_cardbus_mem_size = memparse(str + 10, &str);
6556 			} else if (!strncmp(str, "resource_alignment=", 19)) {
6557 				resource_alignment_param = str + 19;
6558 			} else if (!strncmp(str, "ecrc=", 5)) {
6559 				pcie_ecrc_get_policy(str + 5);
6560 			} else if (!strncmp(str, "hpiosize=", 9)) {
6561 				pci_hotplug_io_size = memparse(str + 9, &str);
6562 			} else if (!strncmp(str, "hpmmiosize=", 11)) {
6563 				pci_hotplug_mmio_size = memparse(str + 11, &str);
6564 			} else if (!strncmp(str, "hpmmioprefsize=", 15)) {
6565 				pci_hotplug_mmio_pref_size = memparse(str + 15, &str);
6566 			} else if (!strncmp(str, "hpmemsize=", 10)) {
6567 				pci_hotplug_mmio_size = memparse(str + 10, &str);
6568 				pci_hotplug_mmio_pref_size = pci_hotplug_mmio_size;
6569 			} else if (!strncmp(str, "hpbussize=", 10)) {
6570 				pci_hotplug_bus_size =
6571 					simple_strtoul(str + 10, &str, 0);
6572 				if (pci_hotplug_bus_size > 0xff)
6573 					pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
6574 			} else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
6575 				pcie_bus_config = PCIE_BUS_TUNE_OFF;
6576 			} else if (!strncmp(str, "pcie_bus_safe", 13)) {
6577 				pcie_bus_config = PCIE_BUS_SAFE;
6578 			} else if (!strncmp(str, "pcie_bus_perf", 13)) {
6579 				pcie_bus_config = PCIE_BUS_PERFORMANCE;
6580 			} else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
6581 				pcie_bus_config = PCIE_BUS_PEER2PEER;
6582 			} else if (!strncmp(str, "pcie_scan_all", 13)) {
6583 				pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
6584 			} else if (!strncmp(str, "disable_acs_redir=", 18)) {
6585 				disable_acs_redir_param = str + 18;
6586 			} else {
6587 				pr_err("PCI: Unknown option `%s'\n", str);
6588 			}
6589 		}
6590 		str = k;
6591 	}
6592 	return 0;
6593 }
6594 early_param("pci", pci_setup);
6595 
6596 /*
6597  * 'resource_alignment_param' and 'disable_acs_redir_param' are initialized
6598  * in pci_setup(), above, to point to data in the __initdata section which
6599  * will be freed after the init sequence is complete. We can't allocate memory
6600  * in pci_setup() because some architectures do not have any memory allocation
6601  * service available during an early_param() call. So we allocate memory and
6602  * copy the variable here before the init section is freed.
6603  *
6604  */
pci_realloc_setup_params(void)6605 static int __init pci_realloc_setup_params(void)
6606 {
6607 	resource_alignment_param = kstrdup(resource_alignment_param,
6608 					   GFP_KERNEL);
6609 	disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
6610 
6611 	return 0;
6612 }
6613 pure_initcall(pci_realloc_setup_params);
6614