1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * PCI Bus Services, see include/linux/pci.h for further explanation.
4  *
5  * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
6  * David Mosberger-Tang
7  *
8  * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
9  */
10 
11 #include <linux/acpi.h>
12 #include <linux/kernel.h>
13 #include <linux/delay.h>
14 #include <linux/dmi.h>
15 #include <linux/init.h>
16 #include <linux/msi.h>
17 #include <linux/of.h>
18 #include <linux/pci.h>
19 #include <linux/pm.h>
20 #include <linux/slab.h>
21 #include <linux/module.h>
22 #include <linux/spinlock.h>
23 #include <linux/string.h>
24 #include <linux/log2.h>
25 #include <linux/logic_pio.h>
26 #include <linux/pm_wakeup.h>
27 #include <linux/device.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/pci_hotplug.h>
30 #include <linux/vmalloc.h>
31 #include <asm/dma.h>
32 #include <linux/aer.h>
33 #include <linux/bitfield.h>
34 #include "pci.h"
35 
36 DEFINE_MUTEX(pci_slot_mutex);
37 
38 const char *pci_power_names[] = {
39 	"error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
40 };
41 EXPORT_SYMBOL_GPL(pci_power_names);
42 
43 #ifdef CONFIG_X86_32
44 int isa_dma_bridge_buggy;
45 EXPORT_SYMBOL(isa_dma_bridge_buggy);
46 #endif
47 
48 int pci_pci_problems;
49 EXPORT_SYMBOL(pci_pci_problems);
50 
51 unsigned int pci_pm_d3hot_delay;
52 
53 static void pci_pme_list_scan(struct work_struct *work);
54 
55 static LIST_HEAD(pci_pme_list);
56 static DEFINE_MUTEX(pci_pme_list_mutex);
57 static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
58 
59 struct pci_pme_device {
60 	struct list_head list;
61 	struct pci_dev *dev;
62 };
63 
64 #define PME_TIMEOUT 1000 /* How long between PME checks */
65 
66 /*
67  * Following exit from Conventional Reset, devices must be ready within 1 sec
68  * (PCIe r6.0 sec 6.6.1).  A D3cold to D0 transition implies a Conventional
69  * Reset (PCIe r6.0 sec 5.8).
70  */
71 #define PCI_RESET_WAIT 1000 /* msec */
72 
73 /*
74  * Devices may extend the 1 sec period through Request Retry Status
75  * completions (PCIe r6.0 sec 2.3.1).  The spec does not provide an upper
76  * limit, but 60 sec ought to be enough for any device to become
77  * responsive.
78  */
79 #define PCIE_RESET_READY_POLL_MS 60000 /* msec */
80 
pci_dev_d3_sleep(struct pci_dev * dev)81 static void pci_dev_d3_sleep(struct pci_dev *dev)
82 {
83 	unsigned int delay_ms = max(dev->d3hot_delay, pci_pm_d3hot_delay);
84 	unsigned int upper;
85 
86 	if (delay_ms) {
87 		/* Use a 20% upper bound, 1ms minimum */
88 		upper = max(DIV_ROUND_CLOSEST(delay_ms, 5), 1U);
89 		usleep_range(delay_ms * USEC_PER_MSEC,
90 			     (delay_ms + upper) * USEC_PER_MSEC);
91 	}
92 }
93 
pci_reset_supported(struct pci_dev * dev)94 bool pci_reset_supported(struct pci_dev *dev)
95 {
96 	return dev->reset_methods[0] != 0;
97 }
98 
99 #ifdef CONFIG_PCI_DOMAINS
100 int pci_domains_supported = 1;
101 #endif
102 
103 #define DEFAULT_CARDBUS_IO_SIZE		(256)
104 #define DEFAULT_CARDBUS_MEM_SIZE	(64*1024*1024)
105 /* pci=cbmemsize=nnM,cbiosize=nn can override this */
106 unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
107 unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
108 
109 #define DEFAULT_HOTPLUG_IO_SIZE		(256)
110 #define DEFAULT_HOTPLUG_MMIO_SIZE	(2*1024*1024)
111 #define DEFAULT_HOTPLUG_MMIO_PREF_SIZE	(2*1024*1024)
112 /* hpiosize=nn can override this */
113 unsigned long pci_hotplug_io_size  = DEFAULT_HOTPLUG_IO_SIZE;
114 /*
115  * pci=hpmmiosize=nnM overrides non-prefetchable MMIO size,
116  * pci=hpmmioprefsize=nnM overrides prefetchable MMIO size;
117  * pci=hpmemsize=nnM overrides both
118  */
119 unsigned long pci_hotplug_mmio_size = DEFAULT_HOTPLUG_MMIO_SIZE;
120 unsigned long pci_hotplug_mmio_pref_size = DEFAULT_HOTPLUG_MMIO_PREF_SIZE;
121 
122 #define DEFAULT_HOTPLUG_BUS_SIZE	1
123 unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
124 
125 
126 /* PCIe MPS/MRRS strategy; can be overridden by kernel command-line param */
127 #ifdef CONFIG_PCIE_BUS_TUNE_OFF
128 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
129 #elif defined CONFIG_PCIE_BUS_SAFE
130 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE;
131 #elif defined CONFIG_PCIE_BUS_PERFORMANCE
132 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE;
133 #elif defined CONFIG_PCIE_BUS_PEER2PEER
134 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PEER2PEER;
135 #else
136 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
137 #endif
138 
139 /*
140  * The default CLS is used if arch didn't set CLS explicitly and not
141  * all pci devices agree on the same value.  Arch can override either
142  * the dfl or actual value as it sees fit.  Don't forget this is
143  * measured in 32-bit words, not bytes.
144  */
145 u8 pci_dfl_cache_line_size __ro_after_init = L1_CACHE_BYTES >> 2;
146 u8 pci_cache_line_size __ro_after_init ;
147 
148 /*
149  * If we set up a device for bus mastering, we need to check the latency
150  * timer as certain BIOSes forget to set it properly.
151  */
152 unsigned int pcibios_max_latency = 255;
153 
154 /* If set, the PCIe ARI capability will not be used. */
155 static bool pcie_ari_disabled;
156 
157 /* If set, the PCIe ATS capability will not be used. */
158 static bool pcie_ats_disabled;
159 
160 /* If set, the PCI config space of each device is printed during boot. */
161 bool pci_early_dump;
162 
pci_ats_disabled(void)163 bool pci_ats_disabled(void)
164 {
165 	return pcie_ats_disabled;
166 }
167 EXPORT_SYMBOL_GPL(pci_ats_disabled);
168 
169 /* Disable bridge_d3 for all PCIe ports */
170 static bool pci_bridge_d3_disable;
171 /* Force bridge_d3 for all PCIe ports */
172 static bool pci_bridge_d3_force;
173 
pcie_port_pm_setup(char * str)174 static int __init pcie_port_pm_setup(char *str)
175 {
176 	if (!strcmp(str, "off"))
177 		pci_bridge_d3_disable = true;
178 	else if (!strcmp(str, "force"))
179 		pci_bridge_d3_force = true;
180 	return 1;
181 }
182 __setup("pcie_port_pm=", pcie_port_pm_setup);
183 
184 /**
185  * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
186  * @bus: pointer to PCI bus structure to search
187  *
188  * Given a PCI bus, returns the highest PCI bus number present in the set
189  * including the given PCI bus and its list of child PCI buses.
190  */
pci_bus_max_busnr(struct pci_bus * bus)191 unsigned char pci_bus_max_busnr(struct pci_bus *bus)
192 {
193 	struct pci_bus *tmp;
194 	unsigned char max, n;
195 
196 	max = bus->busn_res.end;
197 	list_for_each_entry(tmp, &bus->children, node) {
198 		n = pci_bus_max_busnr(tmp);
199 		if (n > max)
200 			max = n;
201 	}
202 	return max;
203 }
204 EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
205 
206 /**
207  * pci_status_get_and_clear_errors - return and clear error bits in PCI_STATUS
208  * @pdev: the PCI device
209  *
210  * Returns error bits set in PCI_STATUS and clears them.
211  */
pci_status_get_and_clear_errors(struct pci_dev * pdev)212 int pci_status_get_and_clear_errors(struct pci_dev *pdev)
213 {
214 	u16 status;
215 	int ret;
216 
217 	ret = pci_read_config_word(pdev, PCI_STATUS, &status);
218 	if (ret != PCIBIOS_SUCCESSFUL)
219 		return -EIO;
220 
221 	status &= PCI_STATUS_ERROR_BITS;
222 	if (status)
223 		pci_write_config_word(pdev, PCI_STATUS, status);
224 
225 	return status;
226 }
227 EXPORT_SYMBOL_GPL(pci_status_get_and_clear_errors);
228 
229 #ifdef CONFIG_HAS_IOMEM
__pci_ioremap_resource(struct pci_dev * pdev,int bar,bool write_combine)230 static void __iomem *__pci_ioremap_resource(struct pci_dev *pdev, int bar,
231 					    bool write_combine)
232 {
233 	struct resource *res = &pdev->resource[bar];
234 	resource_size_t start = res->start;
235 	resource_size_t size = resource_size(res);
236 
237 	/*
238 	 * Make sure the BAR is actually a memory resource, not an IO resource
239 	 */
240 	if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
241 		pci_err(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
242 		return NULL;
243 	}
244 
245 	if (write_combine)
246 		return ioremap_wc(start, size);
247 
248 	return ioremap(start, size);
249 }
250 
pci_ioremap_bar(struct pci_dev * pdev,int bar)251 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
252 {
253 	return __pci_ioremap_resource(pdev, bar, false);
254 }
255 EXPORT_SYMBOL_GPL(pci_ioremap_bar);
256 
pci_ioremap_wc_bar(struct pci_dev * pdev,int bar)257 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
258 {
259 	return __pci_ioremap_resource(pdev, bar, true);
260 }
261 EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
262 #endif
263 
264 /**
265  * pci_dev_str_match_path - test if a path string matches a device
266  * @dev: the PCI device to test
267  * @path: string to match the device against
268  * @endptr: pointer to the string after the match
269  *
270  * Test if a string (typically from a kernel parameter) formatted as a
271  * path of device/function addresses matches a PCI device. The string must
272  * be of the form:
273  *
274  *   [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
275  *
276  * A path for a device can be obtained using 'lspci -t'.  Using a path
277  * is more robust against bus renumbering than using only a single bus,
278  * device and function address.
279  *
280  * Returns 1 if the string matches the device, 0 if it does not and
281  * a negative error code if it fails to parse the string.
282  */
pci_dev_str_match_path(struct pci_dev * dev,const char * path,const char ** endptr)283 static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
284 				  const char **endptr)
285 {
286 	int ret;
287 	unsigned int seg, bus, slot, func;
288 	char *wpath, *p;
289 	char end;
290 
291 	*endptr = strchrnul(path, ';');
292 
293 	wpath = kmemdup_nul(path, *endptr - path, GFP_ATOMIC);
294 	if (!wpath)
295 		return -ENOMEM;
296 
297 	while (1) {
298 		p = strrchr(wpath, '/');
299 		if (!p)
300 			break;
301 		ret = sscanf(p, "/%x.%x%c", &slot, &func, &end);
302 		if (ret != 2) {
303 			ret = -EINVAL;
304 			goto free_and_exit;
305 		}
306 
307 		if (dev->devfn != PCI_DEVFN(slot, func)) {
308 			ret = 0;
309 			goto free_and_exit;
310 		}
311 
312 		/*
313 		 * Note: we don't need to get a reference to the upstream
314 		 * bridge because we hold a reference to the top level
315 		 * device which should hold a reference to the bridge,
316 		 * and so on.
317 		 */
318 		dev = pci_upstream_bridge(dev);
319 		if (!dev) {
320 			ret = 0;
321 			goto free_and_exit;
322 		}
323 
324 		*p = 0;
325 	}
326 
327 	ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot,
328 		     &func, &end);
329 	if (ret != 4) {
330 		seg = 0;
331 		ret = sscanf(wpath, "%x:%x.%x%c", &bus, &slot, &func, &end);
332 		if (ret != 3) {
333 			ret = -EINVAL;
334 			goto free_and_exit;
335 		}
336 	}
337 
338 	ret = (seg == pci_domain_nr(dev->bus) &&
339 	       bus == dev->bus->number &&
340 	       dev->devfn == PCI_DEVFN(slot, func));
341 
342 free_and_exit:
343 	kfree(wpath);
344 	return ret;
345 }
346 
347 /**
348  * pci_dev_str_match - test if a string matches a device
349  * @dev: the PCI device to test
350  * @p: string to match the device against
351  * @endptr: pointer to the string after the match
352  *
353  * Test if a string (typically from a kernel parameter) matches a specified
354  * PCI device. The string may be of one of the following formats:
355  *
356  *   [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
357  *   pci:<vendor>:<device>[:<subvendor>:<subdevice>]
358  *
359  * The first format specifies a PCI bus/device/function address which
360  * may change if new hardware is inserted, if motherboard firmware changes,
361  * or due to changes caused in kernel parameters. If the domain is
362  * left unspecified, it is taken to be 0.  In order to be robust against
363  * bus renumbering issues, a path of PCI device/function numbers may be used
364  * to address the specific device.  The path for a device can be determined
365  * through the use of 'lspci -t'.
366  *
367  * The second format matches devices using IDs in the configuration
368  * space which may match multiple devices in the system. A value of 0
369  * for any field will match all devices. (Note: this differs from
370  * in-kernel code that uses PCI_ANY_ID which is ~0; this is for
371  * legacy reasons and convenience so users don't have to specify
372  * FFFFFFFFs on the command line.)
373  *
374  * Returns 1 if the string matches the device, 0 if it does not and
375  * a negative error code if the string cannot be parsed.
376  */
pci_dev_str_match(struct pci_dev * dev,const char * p,const char ** endptr)377 static int pci_dev_str_match(struct pci_dev *dev, const char *p,
378 			     const char **endptr)
379 {
380 	int ret;
381 	int count;
382 	unsigned short vendor, device, subsystem_vendor, subsystem_device;
383 
384 	if (strncmp(p, "pci:", 4) == 0) {
385 		/* PCI vendor/device (subvendor/subdevice) IDs are specified */
386 		p += 4;
387 		ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device,
388 			     &subsystem_vendor, &subsystem_device, &count);
389 		if (ret != 4) {
390 			ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count);
391 			if (ret != 2)
392 				return -EINVAL;
393 
394 			subsystem_vendor = 0;
395 			subsystem_device = 0;
396 		}
397 
398 		p += count;
399 
400 		if ((!vendor || vendor == dev->vendor) &&
401 		    (!device || device == dev->device) &&
402 		    (!subsystem_vendor ||
403 			    subsystem_vendor == dev->subsystem_vendor) &&
404 		    (!subsystem_device ||
405 			    subsystem_device == dev->subsystem_device))
406 			goto found;
407 	} else {
408 		/*
409 		 * PCI Bus, Device, Function IDs are specified
410 		 * (optionally, may include a path of devfns following it)
411 		 */
412 		ret = pci_dev_str_match_path(dev, p, &p);
413 		if (ret < 0)
414 			return ret;
415 		else if (ret)
416 			goto found;
417 	}
418 
419 	*endptr = p;
420 	return 0;
421 
422 found:
423 	*endptr = p;
424 	return 1;
425 }
426 
__pci_find_next_cap_ttl(struct pci_bus * bus,unsigned int devfn,u8 pos,int cap,int * ttl)427 static u8 __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
428 				  u8 pos, int cap, int *ttl)
429 {
430 	u8 id;
431 	u16 ent;
432 
433 	pci_bus_read_config_byte(bus, devfn, pos, &pos);
434 
435 	while ((*ttl)--) {
436 		if (pos < 0x40)
437 			break;
438 		pos &= ~3;
439 		pci_bus_read_config_word(bus, devfn, pos, &ent);
440 
441 		id = ent & 0xff;
442 		if (id == 0xff)
443 			break;
444 		if (id == cap)
445 			return pos;
446 		pos = (ent >> 8);
447 	}
448 	return 0;
449 }
450 
__pci_find_next_cap(struct pci_bus * bus,unsigned int devfn,u8 pos,int cap)451 static u8 __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
452 			      u8 pos, int cap)
453 {
454 	int ttl = PCI_FIND_CAP_TTL;
455 
456 	return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
457 }
458 
pci_find_next_capability(struct pci_dev * dev,u8 pos,int cap)459 u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
460 {
461 	return __pci_find_next_cap(dev->bus, dev->devfn,
462 				   pos + PCI_CAP_LIST_NEXT, cap);
463 }
464 EXPORT_SYMBOL_GPL(pci_find_next_capability);
465 
__pci_bus_find_cap_start(struct pci_bus * bus,unsigned int devfn,u8 hdr_type)466 static u8 __pci_bus_find_cap_start(struct pci_bus *bus,
467 				    unsigned int devfn, u8 hdr_type)
468 {
469 	u16 status;
470 
471 	pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
472 	if (!(status & PCI_STATUS_CAP_LIST))
473 		return 0;
474 
475 	switch (hdr_type) {
476 	case PCI_HEADER_TYPE_NORMAL:
477 	case PCI_HEADER_TYPE_BRIDGE:
478 		return PCI_CAPABILITY_LIST;
479 	case PCI_HEADER_TYPE_CARDBUS:
480 		return PCI_CB_CAPABILITY_LIST;
481 	}
482 
483 	return 0;
484 }
485 
486 /**
487  * pci_find_capability - query for devices' capabilities
488  * @dev: PCI device to query
489  * @cap: capability code
490  *
491  * Tell if a device supports a given PCI capability.
492  * Returns the address of the requested capability structure within the
493  * device's PCI configuration space or 0 in case the device does not
494  * support it.  Possible values for @cap include:
495  *
496  *  %PCI_CAP_ID_PM           Power Management
497  *  %PCI_CAP_ID_AGP          Accelerated Graphics Port
498  *  %PCI_CAP_ID_VPD          Vital Product Data
499  *  %PCI_CAP_ID_SLOTID       Slot Identification
500  *  %PCI_CAP_ID_MSI          Message Signalled Interrupts
501  *  %PCI_CAP_ID_CHSWP        CompactPCI HotSwap
502  *  %PCI_CAP_ID_PCIX         PCI-X
503  *  %PCI_CAP_ID_EXP          PCI Express
504  */
pci_find_capability(struct pci_dev * dev,int cap)505 u8 pci_find_capability(struct pci_dev *dev, int cap)
506 {
507 	u8 pos;
508 
509 	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
510 	if (pos)
511 		pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
512 
513 	return pos;
514 }
515 EXPORT_SYMBOL(pci_find_capability);
516 
517 /**
518  * pci_bus_find_capability - query for devices' capabilities
519  * @bus: the PCI bus to query
520  * @devfn: PCI device to query
521  * @cap: capability code
522  *
523  * Like pci_find_capability() but works for PCI devices that do not have a
524  * pci_dev structure set up yet.
525  *
526  * Returns the address of the requested capability structure within the
527  * device's PCI configuration space or 0 in case the device does not
528  * support it.
529  */
pci_bus_find_capability(struct pci_bus * bus,unsigned int devfn,int cap)530 u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
531 {
532 	u8 hdr_type, pos;
533 
534 	pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
535 
536 	pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & PCI_HEADER_TYPE_MASK);
537 	if (pos)
538 		pos = __pci_find_next_cap(bus, devfn, pos, cap);
539 
540 	return pos;
541 }
542 EXPORT_SYMBOL(pci_bus_find_capability);
543 
544 /**
545  * pci_find_next_ext_capability - Find an extended capability
546  * @dev: PCI device to query
547  * @start: address at which to start looking (0 to start at beginning of list)
548  * @cap: capability code
549  *
550  * Returns the address of the next matching extended capability structure
551  * within the device's PCI configuration space or 0 if the device does
552  * not support it.  Some capabilities can occur several times, e.g., the
553  * vendor-specific capability, and this provides a way to find them all.
554  */
pci_find_next_ext_capability(struct pci_dev * dev,u16 start,int cap)555 u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 start, int cap)
556 {
557 	u32 header;
558 	int ttl;
559 	u16 pos = PCI_CFG_SPACE_SIZE;
560 
561 	/* minimum 8 bytes per capability */
562 	ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
563 
564 	if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
565 		return 0;
566 
567 	if (start)
568 		pos = start;
569 
570 	if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
571 		return 0;
572 
573 	/*
574 	 * If we have no capabilities, this is indicated by cap ID,
575 	 * cap version and next pointer all being 0.
576 	 */
577 	if (header == 0)
578 		return 0;
579 
580 	while (ttl-- > 0) {
581 		if (PCI_EXT_CAP_ID(header) == cap && pos != start)
582 			return pos;
583 
584 		pos = PCI_EXT_CAP_NEXT(header);
585 		if (pos < PCI_CFG_SPACE_SIZE)
586 			break;
587 
588 		if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
589 			break;
590 	}
591 
592 	return 0;
593 }
594 EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
595 
596 /**
597  * pci_find_ext_capability - Find an extended capability
598  * @dev: PCI device to query
599  * @cap: capability code
600  *
601  * Returns the address of the requested extended capability structure
602  * within the device's PCI configuration space or 0 if the device does
603  * not support it.  Possible values for @cap include:
604  *
605  *  %PCI_EXT_CAP_ID_ERR		Advanced Error Reporting
606  *  %PCI_EXT_CAP_ID_VC		Virtual Channel
607  *  %PCI_EXT_CAP_ID_DSN		Device Serial Number
608  *  %PCI_EXT_CAP_ID_PWR		Power Budgeting
609  */
pci_find_ext_capability(struct pci_dev * dev,int cap)610 u16 pci_find_ext_capability(struct pci_dev *dev, int cap)
611 {
612 	return pci_find_next_ext_capability(dev, 0, cap);
613 }
614 EXPORT_SYMBOL_GPL(pci_find_ext_capability);
615 
616 /**
617  * pci_get_dsn - Read and return the 8-byte Device Serial Number
618  * @dev: PCI device to query
619  *
620  * Looks up the PCI_EXT_CAP_ID_DSN and reads the 8 bytes of the Device Serial
621  * Number.
622  *
623  * Returns the DSN, or zero if the capability does not exist.
624  */
pci_get_dsn(struct pci_dev * dev)625 u64 pci_get_dsn(struct pci_dev *dev)
626 {
627 	u32 dword;
628 	u64 dsn;
629 	int pos;
630 
631 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN);
632 	if (!pos)
633 		return 0;
634 
635 	/*
636 	 * The Device Serial Number is two dwords offset 4 bytes from the
637 	 * capability position. The specification says that the first dword is
638 	 * the lower half, and the second dword is the upper half.
639 	 */
640 	pos += 4;
641 	pci_read_config_dword(dev, pos, &dword);
642 	dsn = (u64)dword;
643 	pci_read_config_dword(dev, pos + 4, &dword);
644 	dsn |= ((u64)dword) << 32;
645 
646 	return dsn;
647 }
648 EXPORT_SYMBOL_GPL(pci_get_dsn);
649 
__pci_find_next_ht_cap(struct pci_dev * dev,u8 pos,int ht_cap)650 static u8 __pci_find_next_ht_cap(struct pci_dev *dev, u8 pos, int ht_cap)
651 {
652 	int rc, ttl = PCI_FIND_CAP_TTL;
653 	u8 cap, mask;
654 
655 	if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
656 		mask = HT_3BIT_CAP_MASK;
657 	else
658 		mask = HT_5BIT_CAP_MASK;
659 
660 	pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
661 				      PCI_CAP_ID_HT, &ttl);
662 	while (pos) {
663 		rc = pci_read_config_byte(dev, pos + 3, &cap);
664 		if (rc != PCIBIOS_SUCCESSFUL)
665 			return 0;
666 
667 		if ((cap & mask) == ht_cap)
668 			return pos;
669 
670 		pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
671 					      pos + PCI_CAP_LIST_NEXT,
672 					      PCI_CAP_ID_HT, &ttl);
673 	}
674 
675 	return 0;
676 }
677 
678 /**
679  * pci_find_next_ht_capability - query a device's HyperTransport capabilities
680  * @dev: PCI device to query
681  * @pos: Position from which to continue searching
682  * @ht_cap: HyperTransport capability code
683  *
684  * To be used in conjunction with pci_find_ht_capability() to search for
685  * all capabilities matching @ht_cap. @pos should always be a value returned
686  * from pci_find_ht_capability().
687  *
688  * NB. To be 100% safe against broken PCI devices, the caller should take
689  * steps to avoid an infinite loop.
690  */
pci_find_next_ht_capability(struct pci_dev * dev,u8 pos,int ht_cap)691 u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap)
692 {
693 	return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
694 }
695 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
696 
697 /**
698  * pci_find_ht_capability - query a device's HyperTransport capabilities
699  * @dev: PCI device to query
700  * @ht_cap: HyperTransport capability code
701  *
702  * Tell if a device supports a given HyperTransport capability.
703  * Returns an address within the device's PCI configuration space
704  * or 0 in case the device does not support the request capability.
705  * The address points to the PCI capability, of type PCI_CAP_ID_HT,
706  * which has a HyperTransport capability matching @ht_cap.
707  */
pci_find_ht_capability(struct pci_dev * dev,int ht_cap)708 u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
709 {
710 	u8 pos;
711 
712 	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
713 	if (pos)
714 		pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
715 
716 	return pos;
717 }
718 EXPORT_SYMBOL_GPL(pci_find_ht_capability);
719 
720 /**
721  * pci_find_vsec_capability - Find a vendor-specific extended capability
722  * @dev: PCI device to query
723  * @vendor: Vendor ID for which capability is defined
724  * @cap: Vendor-specific capability ID
725  *
726  * If @dev has Vendor ID @vendor, search for a VSEC capability with
727  * VSEC ID @cap. If found, return the capability offset in
728  * config space; otherwise return 0.
729  */
pci_find_vsec_capability(struct pci_dev * dev,u16 vendor,int cap)730 u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap)
731 {
732 	u16 vsec = 0;
733 	u32 header;
734 	int ret;
735 
736 	if (vendor != dev->vendor)
737 		return 0;
738 
739 	while ((vsec = pci_find_next_ext_capability(dev, vsec,
740 						     PCI_EXT_CAP_ID_VNDR))) {
741 		ret = pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER, &header);
742 		if (ret != PCIBIOS_SUCCESSFUL)
743 			continue;
744 
745 		if (PCI_VNDR_HEADER_ID(header) == cap)
746 			return vsec;
747 	}
748 
749 	return 0;
750 }
751 EXPORT_SYMBOL_GPL(pci_find_vsec_capability);
752 
753 /**
754  * pci_find_dvsec_capability - Find DVSEC for vendor
755  * @dev: PCI device to query
756  * @vendor: Vendor ID to match for the DVSEC
757  * @dvsec: Designated Vendor-specific capability ID
758  *
759  * If DVSEC has Vendor ID @vendor and DVSEC ID @dvsec return the capability
760  * offset in config space; otherwise return 0.
761  */
pci_find_dvsec_capability(struct pci_dev * dev,u16 vendor,u16 dvsec)762 u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec)
763 {
764 	int pos;
765 
766 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DVSEC);
767 	if (!pos)
768 		return 0;
769 
770 	while (pos) {
771 		u16 v, id;
772 
773 		pci_read_config_word(dev, pos + PCI_DVSEC_HEADER1, &v);
774 		pci_read_config_word(dev, pos + PCI_DVSEC_HEADER2, &id);
775 		if (vendor == v && dvsec == id)
776 			return pos;
777 
778 		pos = pci_find_next_ext_capability(dev, pos, PCI_EXT_CAP_ID_DVSEC);
779 	}
780 
781 	return 0;
782 }
783 EXPORT_SYMBOL_GPL(pci_find_dvsec_capability);
784 
785 /**
786  * pci_find_parent_resource - return resource region of parent bus of given
787  *			      region
788  * @dev: PCI device structure contains resources to be searched
789  * @res: child resource record for which parent is sought
790  *
791  * For given resource region of given device, return the resource region of
792  * parent bus the given region is contained in.
793  */
pci_find_parent_resource(const struct pci_dev * dev,struct resource * res)794 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
795 					  struct resource *res)
796 {
797 	const struct pci_bus *bus = dev->bus;
798 	struct resource *r;
799 
800 	pci_bus_for_each_resource(bus, r) {
801 		if (!r)
802 			continue;
803 		if (resource_contains(r, res)) {
804 
805 			/*
806 			 * If the window is prefetchable but the BAR is
807 			 * not, the allocator made a mistake.
808 			 */
809 			if (r->flags & IORESOURCE_PREFETCH &&
810 			    !(res->flags & IORESOURCE_PREFETCH))
811 				return NULL;
812 
813 			/*
814 			 * If we're below a transparent bridge, there may
815 			 * be both a positively-decoded aperture and a
816 			 * subtractively-decoded region that contain the BAR.
817 			 * We want the positively-decoded one, so this depends
818 			 * on pci_bus_for_each_resource() giving us those
819 			 * first.
820 			 */
821 			return r;
822 		}
823 	}
824 	return NULL;
825 }
826 EXPORT_SYMBOL(pci_find_parent_resource);
827 
828 /**
829  * pci_find_resource - Return matching PCI device resource
830  * @dev: PCI device to query
831  * @res: Resource to look for
832  *
833  * Goes over standard PCI resources (BARs) and checks if the given resource
834  * is partially or fully contained in any of them. In that case the
835  * matching resource is returned, %NULL otherwise.
836  */
pci_find_resource(struct pci_dev * dev,struct resource * res)837 struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
838 {
839 	int i;
840 
841 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
842 		struct resource *r = &dev->resource[i];
843 
844 		if (r->start && resource_contains(r, res))
845 			return r;
846 	}
847 
848 	return NULL;
849 }
850 EXPORT_SYMBOL(pci_find_resource);
851 
852 /**
853  * pci_resource_name - Return the name of the PCI resource
854  * @dev: PCI device to query
855  * @i: index of the resource
856  *
857  * Return the standard PCI resource (BAR) name according to their index.
858  */
pci_resource_name(struct pci_dev * dev,unsigned int i)859 const char *pci_resource_name(struct pci_dev *dev, unsigned int i)
860 {
861 	static const char * const bar_name[] = {
862 		"BAR 0",
863 		"BAR 1",
864 		"BAR 2",
865 		"BAR 3",
866 		"BAR 4",
867 		"BAR 5",
868 		"ROM",
869 #ifdef CONFIG_PCI_IOV
870 		"VF BAR 0",
871 		"VF BAR 1",
872 		"VF BAR 2",
873 		"VF BAR 3",
874 		"VF BAR 4",
875 		"VF BAR 5",
876 #endif
877 		"bridge window",	/* "io" included in %pR */
878 		"bridge window",	/* "mem" included in %pR */
879 		"bridge window",	/* "mem pref" included in %pR */
880 	};
881 	static const char * const cardbus_name[] = {
882 		"BAR 1",
883 		"unknown",
884 		"unknown",
885 		"unknown",
886 		"unknown",
887 		"unknown",
888 #ifdef CONFIG_PCI_IOV
889 		"unknown",
890 		"unknown",
891 		"unknown",
892 		"unknown",
893 		"unknown",
894 		"unknown",
895 #endif
896 		"CardBus bridge window 0",	/* I/O */
897 		"CardBus bridge window 1",	/* I/O */
898 		"CardBus bridge window 0",	/* mem */
899 		"CardBus bridge window 1",	/* mem */
900 	};
901 
902 	if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS &&
903 	    i < ARRAY_SIZE(cardbus_name))
904 		return cardbus_name[i];
905 
906 	if (i < ARRAY_SIZE(bar_name))
907 		return bar_name[i];
908 
909 	return "unknown";
910 }
911 
912 /**
913  * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
914  * @dev: the PCI device to operate on
915  * @pos: config space offset of status word
916  * @mask: mask of bit(s) to care about in status word
917  *
918  * Return 1 when mask bit(s) in status word clear, 0 otherwise.
919  */
pci_wait_for_pending(struct pci_dev * dev,int pos,u16 mask)920 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
921 {
922 	int i;
923 
924 	/* Wait for Transaction Pending bit clean */
925 	for (i = 0; i < 4; i++) {
926 		u16 status;
927 		if (i)
928 			msleep((1 << (i - 1)) * 100);
929 
930 		pci_read_config_word(dev, pos, &status);
931 		if (!(status & mask))
932 			return 1;
933 	}
934 
935 	return 0;
936 }
937 
938 static int pci_acs_enable;
939 
940 /**
941  * pci_request_acs - ask for ACS to be enabled if supported
942  */
pci_request_acs(void)943 void pci_request_acs(void)
944 {
945 	pci_acs_enable = 1;
946 }
947 
948 static const char *disable_acs_redir_param;
949 static const char *config_acs_param;
950 
951 struct pci_acs {
952 	u16 cap;
953 	u16 ctrl;
954 	u16 fw_ctrl;
955 };
956 
__pci_config_acs(struct pci_dev * dev,struct pci_acs * caps,const char * p,const u16 acs_mask,const u16 acs_flags)957 static void __pci_config_acs(struct pci_dev *dev, struct pci_acs *caps,
958 			     const char *p, const u16 acs_mask, const u16 acs_flags)
959 {
960 	u16 flags = acs_flags;
961 	u16 mask = acs_mask;
962 	char *delimit;
963 	int ret = 0;
964 
965 	if (!p)
966 		return;
967 
968 	while (*p) {
969 		if (!acs_mask) {
970 			/* Check for ACS flags */
971 			delimit = strstr(p, "@");
972 			if (delimit) {
973 				int end;
974 				u32 shift = 0;
975 
976 				end = delimit - p - 1;
977 				mask = 0;
978 				flags = 0;
979 
980 				while (end > -1) {
981 					if (*(p + end) == '0') {
982 						mask |= 1 << shift;
983 						shift++;
984 						end--;
985 					} else if (*(p + end) == '1') {
986 						mask |= 1 << shift;
987 						flags |= 1 << shift;
988 						shift++;
989 						end--;
990 					} else if ((*(p + end) == 'x') || (*(p + end) == 'X')) {
991 						shift++;
992 						end--;
993 					} else {
994 						pci_err(dev, "Invalid ACS flags... Ignoring\n");
995 						return;
996 					}
997 				}
998 				p = delimit + 1;
999 			} else {
1000 				pci_err(dev, "ACS Flags missing\n");
1001 				return;
1002 			}
1003 		}
1004 
1005 		if (mask & ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR | PCI_ACS_CR |
1006 			    PCI_ACS_UF | PCI_ACS_EC | PCI_ACS_DT)) {
1007 			pci_err(dev, "Invalid ACS flags specified\n");
1008 			return;
1009 		}
1010 
1011 		ret = pci_dev_str_match(dev, p, &p);
1012 		if (ret < 0) {
1013 			pr_info_once("PCI: Can't parse ACS command line parameter\n");
1014 			break;
1015 		} else if (ret == 1) {
1016 			/* Found a match */
1017 			break;
1018 		}
1019 
1020 		if (*p != ';' && *p != ',') {
1021 			/* End of param or invalid format */
1022 			break;
1023 		}
1024 		p++;
1025 	}
1026 
1027 	if (ret != 1)
1028 		return;
1029 
1030 	if (!pci_dev_specific_disable_acs_redir(dev))
1031 		return;
1032 
1033 	pci_dbg(dev, "ACS mask  = %#06x\n", mask);
1034 	pci_dbg(dev, "ACS flags = %#06x\n", flags);
1035 	pci_dbg(dev, "ACS control = %#06x\n", caps->ctrl);
1036 	pci_dbg(dev, "ACS fw_ctrl = %#06x\n", caps->fw_ctrl);
1037 
1038 	/*
1039 	 * For mask bits that are 0, copy them from the firmware setting
1040 	 * and apply flags for all the mask bits that are 1.
1041 	 */
1042 	caps->ctrl = (caps->fw_ctrl & ~mask) | (flags & mask);
1043 
1044 	pci_info(dev, "Configured ACS to %#06x\n", caps->ctrl);
1045 }
1046 
1047 /**
1048  * pci_std_enable_acs - enable ACS on devices using standard ACS capabilities
1049  * @dev: the PCI device
1050  * @caps: default ACS controls
1051  */
pci_std_enable_acs(struct pci_dev * dev,struct pci_acs * caps)1052 static void pci_std_enable_acs(struct pci_dev *dev, struct pci_acs *caps)
1053 {
1054 	/* Source Validation */
1055 	caps->ctrl |= (caps->cap & PCI_ACS_SV);
1056 
1057 	/* P2P Request Redirect */
1058 	caps->ctrl |= (caps->cap & PCI_ACS_RR);
1059 
1060 	/* P2P Completion Redirect */
1061 	caps->ctrl |= (caps->cap & PCI_ACS_CR);
1062 
1063 	/* Upstream Forwarding */
1064 	caps->ctrl |= (caps->cap & PCI_ACS_UF);
1065 
1066 	/* Enable Translation Blocking for external devices and noats */
1067 	if (pci_ats_disabled() || dev->external_facing || dev->requires_dma_protection)
1068 		caps->ctrl |= (caps->cap & PCI_ACS_TB);
1069 }
1070 
1071 /**
1072  * pci_enable_acs - enable ACS if hardware support it
1073  * @dev: the PCI device
1074  */
pci_enable_acs(struct pci_dev * dev)1075 static void pci_enable_acs(struct pci_dev *dev)
1076 {
1077 	struct pci_acs caps;
1078 	bool enable_acs = false;
1079 	int pos;
1080 
1081 	/* If an iommu is present we start with kernel default caps */
1082 	if (pci_acs_enable) {
1083 		if (pci_dev_specific_enable_acs(dev))
1084 			enable_acs = true;
1085 	}
1086 
1087 	pos = dev->acs_cap;
1088 	if (!pos)
1089 		return;
1090 
1091 	pci_read_config_word(dev, pos + PCI_ACS_CAP, &caps.cap);
1092 	pci_read_config_word(dev, pos + PCI_ACS_CTRL, &caps.ctrl);
1093 	caps.fw_ctrl = caps.ctrl;
1094 
1095 	if (enable_acs)
1096 		pci_std_enable_acs(dev, &caps);
1097 
1098 	/*
1099 	 * Always apply caps from the command line, even if there is no iommu.
1100 	 * Trust that the admin has a reason to change the ACS settings.
1101 	 */
1102 	__pci_config_acs(dev, &caps, disable_acs_redir_param,
1103 			 PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC,
1104 			 ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC));
1105 	__pci_config_acs(dev, &caps, config_acs_param, 0, 0);
1106 
1107 	pci_write_config_word(dev, pos + PCI_ACS_CTRL, caps.ctrl);
1108 }
1109 
1110 /**
1111  * pcie_read_tlp_log - read TLP Header Log
1112  * @dev: PCIe device
1113  * @where: PCI Config offset of TLP Header Log
1114  * @tlp_log: TLP Log structure to fill
1115  *
1116  * Fill @tlp_log from TLP Header Log registers, e.g., AER or DPC.
1117  *
1118  * Return: 0 on success and filled TLP Log structure, <0 on error.
1119  */
pcie_read_tlp_log(struct pci_dev * dev,int where,struct pcie_tlp_log * tlp_log)1120 int pcie_read_tlp_log(struct pci_dev *dev, int where,
1121 		      struct pcie_tlp_log *tlp_log)
1122 {
1123 	int i, ret;
1124 
1125 	memset(tlp_log, 0, sizeof(*tlp_log));
1126 
1127 	for (i = 0; i < 4; i++) {
1128 		ret = pci_read_config_dword(dev, where + i * 4,
1129 					    &tlp_log->dw[i]);
1130 		if (ret)
1131 			return pcibios_err_to_errno(ret);
1132 	}
1133 
1134 	return 0;
1135 }
1136 EXPORT_SYMBOL_GPL(pcie_read_tlp_log);
1137 
1138 /**
1139  * pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
1140  * @dev: PCI device to have its BARs restored
1141  *
1142  * Restore the BAR values for a given device, so as to make it
1143  * accessible by its driver.
1144  */
pci_restore_bars(struct pci_dev * dev)1145 static void pci_restore_bars(struct pci_dev *dev)
1146 {
1147 	int i;
1148 
1149 	for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
1150 		pci_update_resource(dev, i);
1151 }
1152 
platform_pci_power_manageable(struct pci_dev * dev)1153 static inline bool platform_pci_power_manageable(struct pci_dev *dev)
1154 {
1155 	if (pci_use_mid_pm())
1156 		return true;
1157 
1158 	return acpi_pci_power_manageable(dev);
1159 }
1160 
platform_pci_set_power_state(struct pci_dev * dev,pci_power_t t)1161 static inline int platform_pci_set_power_state(struct pci_dev *dev,
1162 					       pci_power_t t)
1163 {
1164 	if (pci_use_mid_pm())
1165 		return mid_pci_set_power_state(dev, t);
1166 
1167 	return acpi_pci_set_power_state(dev, t);
1168 }
1169 
platform_pci_get_power_state(struct pci_dev * dev)1170 static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
1171 {
1172 	if (pci_use_mid_pm())
1173 		return mid_pci_get_power_state(dev);
1174 
1175 	return acpi_pci_get_power_state(dev);
1176 }
1177 
platform_pci_refresh_power_state(struct pci_dev * dev)1178 static inline void platform_pci_refresh_power_state(struct pci_dev *dev)
1179 {
1180 	if (!pci_use_mid_pm())
1181 		acpi_pci_refresh_power_state(dev);
1182 }
1183 
platform_pci_choose_state(struct pci_dev * dev)1184 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
1185 {
1186 	if (pci_use_mid_pm())
1187 		return PCI_POWER_ERROR;
1188 
1189 	return acpi_pci_choose_state(dev);
1190 }
1191 
platform_pci_set_wakeup(struct pci_dev * dev,bool enable)1192 static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable)
1193 {
1194 	if (pci_use_mid_pm())
1195 		return PCI_POWER_ERROR;
1196 
1197 	return acpi_pci_wakeup(dev, enable);
1198 }
1199 
platform_pci_need_resume(struct pci_dev * dev)1200 static inline bool platform_pci_need_resume(struct pci_dev *dev)
1201 {
1202 	if (pci_use_mid_pm())
1203 		return false;
1204 
1205 	return acpi_pci_need_resume(dev);
1206 }
1207 
platform_pci_bridge_d3(struct pci_dev * dev)1208 static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
1209 {
1210 	if (pci_use_mid_pm())
1211 		return false;
1212 
1213 	return acpi_pci_bridge_d3(dev);
1214 }
1215 
1216 /**
1217  * pci_update_current_state - Read power state of given device and cache it
1218  * @dev: PCI device to handle.
1219  * @state: State to cache in case the device doesn't have the PM capability
1220  *
1221  * The power state is read from the PMCSR register, which however is
1222  * inaccessible in D3cold.  The platform firmware is therefore queried first
1223  * to detect accessibility of the register.  In case the platform firmware
1224  * reports an incorrect state or the device isn't power manageable by the
1225  * platform at all, we try to detect D3cold by testing accessibility of the
1226  * vendor ID in config space.
1227  */
pci_update_current_state(struct pci_dev * dev,pci_power_t state)1228 void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
1229 {
1230 	if (platform_pci_get_power_state(dev) == PCI_D3cold) {
1231 		dev->current_state = PCI_D3cold;
1232 	} else if (dev->pm_cap) {
1233 		u16 pmcsr;
1234 
1235 		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1236 		if (PCI_POSSIBLE_ERROR(pmcsr)) {
1237 			dev->current_state = PCI_D3cold;
1238 			return;
1239 		}
1240 		dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1241 	} else {
1242 		dev->current_state = state;
1243 	}
1244 }
1245 
1246 /**
1247  * pci_refresh_power_state - Refresh the given device's power state data
1248  * @dev: Target PCI device.
1249  *
1250  * Ask the platform to refresh the devices power state information and invoke
1251  * pci_update_current_state() to update its current PCI power state.
1252  */
pci_refresh_power_state(struct pci_dev * dev)1253 void pci_refresh_power_state(struct pci_dev *dev)
1254 {
1255 	platform_pci_refresh_power_state(dev);
1256 	pci_update_current_state(dev, dev->current_state);
1257 }
1258 
1259 /**
1260  * pci_platform_power_transition - Use platform to change device power state
1261  * @dev: PCI device to handle.
1262  * @state: State to put the device into.
1263  */
pci_platform_power_transition(struct pci_dev * dev,pci_power_t state)1264 int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
1265 {
1266 	int error;
1267 
1268 	error = platform_pci_set_power_state(dev, state);
1269 	if (!error)
1270 		pci_update_current_state(dev, state);
1271 	else if (!dev->pm_cap) /* Fall back to PCI_D0 */
1272 		dev->current_state = PCI_D0;
1273 
1274 	return error;
1275 }
1276 EXPORT_SYMBOL_GPL(pci_platform_power_transition);
1277 
pci_resume_one(struct pci_dev * pci_dev,void * ign)1278 static int pci_resume_one(struct pci_dev *pci_dev, void *ign)
1279 {
1280 	pm_request_resume(&pci_dev->dev);
1281 	return 0;
1282 }
1283 
1284 /**
1285  * pci_resume_bus - Walk given bus and runtime resume devices on it
1286  * @bus: Top bus of the subtree to walk.
1287  */
pci_resume_bus(struct pci_bus * bus)1288 void pci_resume_bus(struct pci_bus *bus)
1289 {
1290 	if (bus)
1291 		pci_walk_bus(bus, pci_resume_one, NULL);
1292 }
1293 
pci_dev_wait(struct pci_dev * dev,char * reset_type,int timeout)1294 static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
1295 {
1296 	int delay = 1;
1297 	bool retrain = false;
1298 	struct pci_dev *root, *bridge;
1299 
1300 	root = pcie_find_root_port(dev);
1301 
1302 	if (pci_is_pcie(dev)) {
1303 		bridge = pci_upstream_bridge(dev);
1304 		if (bridge)
1305 			retrain = true;
1306 	}
1307 
1308 	/*
1309 	 * The caller has already waited long enough after a reset that the
1310 	 * device should respond to config requests, but it may respond
1311 	 * with Request Retry Status (RRS) if it needs more time to
1312 	 * initialize.
1313 	 *
1314 	 * If the device is below a Root Port with Configuration RRS
1315 	 * Software Visibility enabled, reading the Vendor ID returns a
1316 	 * special data value if the device responded with RRS.  Read the
1317 	 * Vendor ID until we get non-RRS status.
1318 	 *
1319 	 * If there's no Root Port or Configuration RRS Software Visibility
1320 	 * is not enabled, the device may still respond with RRS, but
1321 	 * hardware may retry the config request.  If no retries receive
1322 	 * Successful Completion, hardware generally synthesizes ~0
1323 	 * (PCI_ERROR_RESPONSE) data to complete the read.  Reading Vendor
1324 	 * ID for VFs and non-existent devices also returns ~0, so read the
1325 	 * Command register until it returns something other than ~0.
1326 	 */
1327 	for (;;) {
1328 		u32 id;
1329 
1330 		if (pci_dev_is_disconnected(dev)) {
1331 			pci_dbg(dev, "disconnected; not waiting\n");
1332 			return -ENOTTY;
1333 		}
1334 
1335 		if (root && root->config_rrs_sv) {
1336 			pci_read_config_dword(dev, PCI_VENDOR_ID, &id);
1337 			if (!pci_bus_rrs_vendor_id(id))
1338 				break;
1339 		} else {
1340 			pci_read_config_dword(dev, PCI_COMMAND, &id);
1341 			if (!PCI_POSSIBLE_ERROR(id))
1342 				break;
1343 		}
1344 
1345 		if (delay > timeout) {
1346 			pci_warn(dev, "not ready %dms after %s; giving up\n",
1347 				 delay - 1, reset_type);
1348 			return -ENOTTY;
1349 		}
1350 
1351 		if (delay > PCI_RESET_WAIT) {
1352 			if (retrain) {
1353 				retrain = false;
1354 				if (pcie_failed_link_retrain(bridge) == 0) {
1355 					delay = 1;
1356 					continue;
1357 				}
1358 			}
1359 			pci_info(dev, "not ready %dms after %s; waiting\n",
1360 				 delay - 1, reset_type);
1361 		}
1362 
1363 		msleep(delay);
1364 		delay *= 2;
1365 	}
1366 
1367 	if (delay > PCI_RESET_WAIT)
1368 		pci_info(dev, "ready %dms after %s\n", delay - 1,
1369 			 reset_type);
1370 	else
1371 		pci_dbg(dev, "ready %dms after %s\n", delay - 1,
1372 			reset_type);
1373 
1374 	return 0;
1375 }
1376 
1377 /**
1378  * pci_power_up - Put the given device into D0
1379  * @dev: PCI device to power up
1380  *
1381  * On success, return 0 or 1, depending on whether or not it is necessary to
1382  * restore the device's BARs subsequently (1 is returned in that case).
1383  *
1384  * On failure, return a negative error code.  Always return failure if @dev
1385  * lacks a Power Management Capability, even if the platform was able to
1386  * put the device in D0 via non-PCI means.
1387  */
pci_power_up(struct pci_dev * dev)1388 int pci_power_up(struct pci_dev *dev)
1389 {
1390 	bool need_restore;
1391 	pci_power_t state;
1392 	u16 pmcsr;
1393 
1394 	platform_pci_set_power_state(dev, PCI_D0);
1395 
1396 	if (!dev->pm_cap) {
1397 		state = platform_pci_get_power_state(dev);
1398 		if (state == PCI_UNKNOWN)
1399 			dev->current_state = PCI_D0;
1400 		else
1401 			dev->current_state = state;
1402 
1403 		return -EIO;
1404 	}
1405 
1406 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1407 	if (PCI_POSSIBLE_ERROR(pmcsr)) {
1408 		pci_err(dev, "Unable to change power state from %s to D0, device inaccessible\n",
1409 			pci_power_name(dev->current_state));
1410 		dev->current_state = PCI_D3cold;
1411 		return -EIO;
1412 	}
1413 
1414 	state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1415 
1416 	need_restore = (state == PCI_D3hot || dev->current_state >= PCI_D3hot) &&
1417 			!(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET);
1418 
1419 	if (state == PCI_D0)
1420 		goto end;
1421 
1422 	/*
1423 	 * Force the entire word to 0. This doesn't affect PME_Status, disables
1424 	 * PME_En, and sets PowerState to 0.
1425 	 */
1426 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, 0);
1427 
1428 	/* Mandatory transition delays; see PCI PM 1.2. */
1429 	if (state == PCI_D3hot)
1430 		pci_dev_d3_sleep(dev);
1431 	else if (state == PCI_D2)
1432 		udelay(PCI_PM_D2_DELAY);
1433 
1434 end:
1435 	dev->current_state = PCI_D0;
1436 	if (need_restore)
1437 		return 1;
1438 
1439 	return 0;
1440 }
1441 
1442 /**
1443  * pci_set_full_power_state - Put a PCI device into D0 and update its state
1444  * @dev: PCI device to power up
1445  * @locked: whether pci_bus_sem is held
1446  *
1447  * Call pci_power_up() to put @dev into D0, read from its PCI_PM_CTRL register
1448  * to confirm the state change, restore its BARs if they might be lost and
1449  * reconfigure ASPM in accordance with the new power state.
1450  *
1451  * If pci_restore_state() is going to be called right after a power state change
1452  * to D0, it is more efficient to use pci_power_up() directly instead of this
1453  * function.
1454  */
pci_set_full_power_state(struct pci_dev * dev,bool locked)1455 static int pci_set_full_power_state(struct pci_dev *dev, bool locked)
1456 {
1457 	u16 pmcsr;
1458 	int ret;
1459 
1460 	ret = pci_power_up(dev);
1461 	if (ret < 0) {
1462 		if (dev->current_state == PCI_D0)
1463 			return 0;
1464 
1465 		return ret;
1466 	}
1467 
1468 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1469 	dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1470 	if (dev->current_state != PCI_D0) {
1471 		pci_info_ratelimited(dev, "Refused to change power state from %s to D0\n",
1472 				     pci_power_name(dev->current_state));
1473 	} else if (ret > 0) {
1474 		/*
1475 		 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
1476 		 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
1477 		 * from D3hot to D0 _may_ perform an internal reset, thereby
1478 		 * going to "D0 Uninitialized" rather than "D0 Initialized".
1479 		 * For example, at least some versions of the 3c905B and the
1480 		 * 3c556B exhibit this behaviour.
1481 		 *
1482 		 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
1483 		 * devices in a D3hot state at boot.  Consequently, we need to
1484 		 * restore at least the BARs so that the device will be
1485 		 * accessible to its driver.
1486 		 */
1487 		pci_restore_bars(dev);
1488 	}
1489 
1490 	if (dev->bus->self)
1491 		pcie_aspm_pm_state_change(dev->bus->self, locked);
1492 
1493 	return 0;
1494 }
1495 
1496 /**
1497  * __pci_dev_set_current_state - Set current state of a PCI device
1498  * @dev: Device to handle
1499  * @data: pointer to state to be set
1500  */
__pci_dev_set_current_state(struct pci_dev * dev,void * data)1501 static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
1502 {
1503 	pci_power_t state = *(pci_power_t *)data;
1504 
1505 	dev->current_state = state;
1506 	return 0;
1507 }
1508 
1509 /**
1510  * pci_bus_set_current_state - Walk given bus and set current state of devices
1511  * @bus: Top bus of the subtree to walk.
1512  * @state: state to be set
1513  */
pci_bus_set_current_state(struct pci_bus * bus,pci_power_t state)1514 void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
1515 {
1516 	if (bus)
1517 		pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1518 }
1519 
__pci_bus_set_current_state(struct pci_bus * bus,pci_power_t state,bool locked)1520 static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state, bool locked)
1521 {
1522 	if (!bus)
1523 		return;
1524 
1525 	if (locked)
1526 		pci_walk_bus_locked(bus, __pci_dev_set_current_state, &state);
1527 	else
1528 		pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1529 }
1530 
1531 /**
1532  * pci_set_low_power_state - Put a PCI device into a low-power state.
1533  * @dev: PCI device to handle.
1534  * @state: PCI power state (D1, D2, D3hot) to put the device into.
1535  * @locked: whether pci_bus_sem is held
1536  *
1537  * Use the device's PCI_PM_CTRL register to put it into a low-power state.
1538  *
1539  * RETURN VALUE:
1540  * -EINVAL if the requested state is invalid.
1541  * -EIO if device does not support PCI PM or its PM capabilities register has a
1542  * wrong version, or device doesn't support the requested state.
1543  * 0 if device already is in the requested state.
1544  * 0 if device's power state has been successfully changed.
1545  */
pci_set_low_power_state(struct pci_dev * dev,pci_power_t state,bool locked)1546 static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state, bool locked)
1547 {
1548 	u16 pmcsr;
1549 
1550 	if (!dev->pm_cap)
1551 		return -EIO;
1552 
1553 	/*
1554 	 * Validate transition: We can enter D0 from any state, but if
1555 	 * we're already in a low-power state, we can only go deeper.  E.g.,
1556 	 * we can go from D1 to D3, but we can't go directly from D3 to D1;
1557 	 * we'd have to go from D3 to D0, then to D1.
1558 	 */
1559 	if (dev->current_state <= PCI_D3cold && dev->current_state > state) {
1560 		pci_dbg(dev, "Invalid power transition (from %s to %s)\n",
1561 			pci_power_name(dev->current_state),
1562 			pci_power_name(state));
1563 		return -EINVAL;
1564 	}
1565 
1566 	/* Check if this device supports the desired state */
1567 	if ((state == PCI_D1 && !dev->d1_support)
1568 	   || (state == PCI_D2 && !dev->d2_support))
1569 		return -EIO;
1570 
1571 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1572 	if (PCI_POSSIBLE_ERROR(pmcsr)) {
1573 		pci_err(dev, "Unable to change power state from %s to %s, device inaccessible\n",
1574 			pci_power_name(dev->current_state),
1575 			pci_power_name(state));
1576 		dev->current_state = PCI_D3cold;
1577 		return -EIO;
1578 	}
1579 
1580 	pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1581 	pmcsr |= state;
1582 
1583 	/* Enter specified state */
1584 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1585 
1586 	/* Mandatory power management transition delays; see PCI PM 1.2. */
1587 	if (state == PCI_D3hot)
1588 		pci_dev_d3_sleep(dev);
1589 	else if (state == PCI_D2)
1590 		udelay(PCI_PM_D2_DELAY);
1591 
1592 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1593 	dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1594 	if (dev->current_state != state)
1595 		pci_info_ratelimited(dev, "Refused to change power state from %s to %s\n",
1596 				     pci_power_name(dev->current_state),
1597 				     pci_power_name(state));
1598 
1599 	if (dev->bus->self)
1600 		pcie_aspm_pm_state_change(dev->bus->self, locked);
1601 
1602 	return 0;
1603 }
1604 
__pci_set_power_state(struct pci_dev * dev,pci_power_t state,bool locked)1605 static int __pci_set_power_state(struct pci_dev *dev, pci_power_t state, bool locked)
1606 {
1607 	int error;
1608 
1609 	/* Bound the state we're entering */
1610 	if (state > PCI_D3cold)
1611 		state = PCI_D3cold;
1612 	else if (state < PCI_D0)
1613 		state = PCI_D0;
1614 	else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
1615 
1616 		/*
1617 		 * If the device or the parent bridge do not support PCI
1618 		 * PM, ignore the request if we're doing anything other
1619 		 * than putting it into D0 (which would only happen on
1620 		 * boot).
1621 		 */
1622 		return 0;
1623 
1624 	/* Check if we're already there */
1625 	if (dev->current_state == state)
1626 		return 0;
1627 
1628 	if (state == PCI_D0)
1629 		return pci_set_full_power_state(dev, locked);
1630 
1631 	/*
1632 	 * This device is quirked not to be put into D3, so don't put it in
1633 	 * D3
1634 	 */
1635 	if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
1636 		return 0;
1637 
1638 	if (state == PCI_D3cold) {
1639 		/*
1640 		 * To put the device in D3cold, put it into D3hot in the native
1641 		 * way, then put it into D3cold using platform ops.
1642 		 */
1643 		error = pci_set_low_power_state(dev, PCI_D3hot, locked);
1644 
1645 		if (pci_platform_power_transition(dev, PCI_D3cold))
1646 			return error;
1647 
1648 		/* Powering off a bridge may power off the whole hierarchy */
1649 		if (dev->current_state == PCI_D3cold)
1650 			__pci_bus_set_current_state(dev->subordinate, PCI_D3cold, locked);
1651 	} else {
1652 		error = pci_set_low_power_state(dev, state, locked);
1653 
1654 		if (pci_platform_power_transition(dev, state))
1655 			return error;
1656 	}
1657 
1658 	return 0;
1659 }
1660 
1661 /**
1662  * pci_set_power_state - Set the power state of a PCI device
1663  * @dev: PCI device to handle.
1664  * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
1665  *
1666  * Transition a device to a new power state, using the platform firmware and/or
1667  * the device's PCI PM registers.
1668  *
1669  * RETURN VALUE:
1670  * -EINVAL if the requested state is invalid.
1671  * -EIO if device does not support PCI PM or its PM capabilities register has a
1672  * wrong version, or device doesn't support the requested state.
1673  * 0 if the transition is to D1 or D2 but D1 and D2 are not supported.
1674  * 0 if device already is in the requested state.
1675  * 0 if the transition is to D3 but D3 is not supported.
1676  * 0 if device's power state has been successfully changed.
1677  */
pci_set_power_state(struct pci_dev * dev,pci_power_t state)1678 int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
1679 {
1680 	return __pci_set_power_state(dev, state, false);
1681 }
1682 EXPORT_SYMBOL(pci_set_power_state);
1683 
pci_set_power_state_locked(struct pci_dev * dev,pci_power_t state)1684 int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state)
1685 {
1686 	lockdep_assert_held(&pci_bus_sem);
1687 
1688 	return __pci_set_power_state(dev, state, true);
1689 }
1690 EXPORT_SYMBOL(pci_set_power_state_locked);
1691 
1692 #define PCI_EXP_SAVE_REGS	7
1693 
_pci_find_saved_cap(struct pci_dev * pci_dev,u16 cap,bool extended)1694 static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
1695 						       u16 cap, bool extended)
1696 {
1697 	struct pci_cap_saved_state *tmp;
1698 
1699 	hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
1700 		if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
1701 			return tmp;
1702 	}
1703 	return NULL;
1704 }
1705 
pci_find_saved_cap(struct pci_dev * dev,char cap)1706 struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
1707 {
1708 	return _pci_find_saved_cap(dev, cap, false);
1709 }
1710 
pci_find_saved_ext_cap(struct pci_dev * dev,u16 cap)1711 struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
1712 {
1713 	return _pci_find_saved_cap(dev, cap, true);
1714 }
1715 
pci_save_pcie_state(struct pci_dev * dev)1716 static int pci_save_pcie_state(struct pci_dev *dev)
1717 {
1718 	int i = 0;
1719 	struct pci_cap_saved_state *save_state;
1720 	u16 *cap;
1721 
1722 	if (!pci_is_pcie(dev))
1723 		return 0;
1724 
1725 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1726 	if (!save_state) {
1727 		pci_err(dev, "buffer not found in %s\n", __func__);
1728 		return -ENOMEM;
1729 	}
1730 
1731 	cap = (u16 *)&save_state->cap.data[0];
1732 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
1733 	pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
1734 	pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
1735 	pcie_capability_read_word(dev, PCI_EXP_RTCTL,  &cap[i++]);
1736 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
1737 	pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
1738 	pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
1739 
1740 	pci_save_aspm_l1ss_state(dev);
1741 	pci_save_ltr_state(dev);
1742 
1743 	return 0;
1744 }
1745 
pci_restore_pcie_state(struct pci_dev * dev)1746 static void pci_restore_pcie_state(struct pci_dev *dev)
1747 {
1748 	int i = 0;
1749 	struct pci_cap_saved_state *save_state;
1750 	u16 *cap;
1751 
1752 	/*
1753 	 * Restore max latencies (in the LTR capability) before enabling
1754 	 * LTR itself in PCI_EXP_DEVCTL2.
1755 	 */
1756 	pci_restore_ltr_state(dev);
1757 	pci_restore_aspm_l1ss_state(dev);
1758 
1759 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1760 	if (!save_state)
1761 		return;
1762 
1763 	/*
1764 	 * Downstream ports reset the LTR enable bit when link goes down.
1765 	 * Check and re-configure the bit here before restoring device.
1766 	 * PCIe r5.0, sec 7.5.3.16.
1767 	 */
1768 	pci_bridge_reconfigure_ltr(dev);
1769 
1770 	cap = (u16 *)&save_state->cap.data[0];
1771 	pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
1772 	pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
1773 	pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
1774 	pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
1775 	pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
1776 	pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
1777 	pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
1778 }
1779 
pci_save_pcix_state(struct pci_dev * dev)1780 static int pci_save_pcix_state(struct pci_dev *dev)
1781 {
1782 	int pos;
1783 	struct pci_cap_saved_state *save_state;
1784 
1785 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1786 	if (!pos)
1787 		return 0;
1788 
1789 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1790 	if (!save_state) {
1791 		pci_err(dev, "buffer not found in %s\n", __func__);
1792 		return -ENOMEM;
1793 	}
1794 
1795 	pci_read_config_word(dev, pos + PCI_X_CMD,
1796 			     (u16 *)save_state->cap.data);
1797 
1798 	return 0;
1799 }
1800 
pci_restore_pcix_state(struct pci_dev * dev)1801 static void pci_restore_pcix_state(struct pci_dev *dev)
1802 {
1803 	int i = 0, pos;
1804 	struct pci_cap_saved_state *save_state;
1805 	u16 *cap;
1806 
1807 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1808 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1809 	if (!save_state || !pos)
1810 		return;
1811 	cap = (u16 *)&save_state->cap.data[0];
1812 
1813 	pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1814 }
1815 
1816 /**
1817  * pci_save_state - save the PCI configuration space of a device before
1818  *		    suspending
1819  * @dev: PCI device that we're dealing with
1820  */
pci_save_state(struct pci_dev * dev)1821 int pci_save_state(struct pci_dev *dev)
1822 {
1823 	int i;
1824 	/* XXX: 100% dword access ok here? */
1825 	for (i = 0; i < 16; i++) {
1826 		pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1827 		pci_dbg(dev, "save config %#04x: %#010x\n",
1828 			i * 4, dev->saved_config_space[i]);
1829 	}
1830 	dev->state_saved = true;
1831 
1832 	i = pci_save_pcie_state(dev);
1833 	if (i != 0)
1834 		return i;
1835 
1836 	i = pci_save_pcix_state(dev);
1837 	if (i != 0)
1838 		return i;
1839 
1840 	pci_save_dpc_state(dev);
1841 	pci_save_aer_state(dev);
1842 	pci_save_ptm_state(dev);
1843 	return pci_save_vc_state(dev);
1844 }
1845 EXPORT_SYMBOL(pci_save_state);
1846 
pci_restore_config_dword(struct pci_dev * pdev,int offset,u32 saved_val,int retry,bool force)1847 static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1848 				     u32 saved_val, int retry, bool force)
1849 {
1850 	u32 val;
1851 
1852 	pci_read_config_dword(pdev, offset, &val);
1853 	if (!force && val == saved_val)
1854 		return;
1855 
1856 	for (;;) {
1857 		pci_dbg(pdev, "restore config %#04x: %#010x -> %#010x\n",
1858 			offset, val, saved_val);
1859 		pci_write_config_dword(pdev, offset, saved_val);
1860 		if (retry-- <= 0)
1861 			return;
1862 
1863 		pci_read_config_dword(pdev, offset, &val);
1864 		if (val == saved_val)
1865 			return;
1866 
1867 		mdelay(1);
1868 	}
1869 }
1870 
pci_restore_config_space_range(struct pci_dev * pdev,int start,int end,int retry,bool force)1871 static void pci_restore_config_space_range(struct pci_dev *pdev,
1872 					   int start, int end, int retry,
1873 					   bool force)
1874 {
1875 	int index;
1876 
1877 	for (index = end; index >= start; index--)
1878 		pci_restore_config_dword(pdev, 4 * index,
1879 					 pdev->saved_config_space[index],
1880 					 retry, force);
1881 }
1882 
pci_restore_config_space(struct pci_dev * pdev)1883 static void pci_restore_config_space(struct pci_dev *pdev)
1884 {
1885 	if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1886 		pci_restore_config_space_range(pdev, 10, 15, 0, false);
1887 		/* Restore BARs before the command register. */
1888 		pci_restore_config_space_range(pdev, 4, 9, 10, false);
1889 		pci_restore_config_space_range(pdev, 0, 3, 0, false);
1890 	} else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1891 		pci_restore_config_space_range(pdev, 12, 15, 0, false);
1892 
1893 		/*
1894 		 * Force rewriting of prefetch registers to avoid S3 resume
1895 		 * issues on Intel PCI bridges that occur when these
1896 		 * registers are not explicitly written.
1897 		 */
1898 		pci_restore_config_space_range(pdev, 9, 11, 0, true);
1899 		pci_restore_config_space_range(pdev, 0, 8, 0, false);
1900 	} else {
1901 		pci_restore_config_space_range(pdev, 0, 15, 0, false);
1902 	}
1903 }
1904 
pci_restore_rebar_state(struct pci_dev * pdev)1905 static void pci_restore_rebar_state(struct pci_dev *pdev)
1906 {
1907 	unsigned int pos, nbars, i;
1908 	u32 ctrl;
1909 
1910 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
1911 	if (!pos)
1912 		return;
1913 
1914 	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1915 	nbars = FIELD_GET(PCI_REBAR_CTRL_NBAR_MASK, ctrl);
1916 
1917 	for (i = 0; i < nbars; i++, pos += 8) {
1918 		struct resource *res;
1919 		int bar_idx, size;
1920 
1921 		pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1922 		bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
1923 		res = pdev->resource + bar_idx;
1924 		size = pci_rebar_bytes_to_size(resource_size(res));
1925 		ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
1926 		ctrl |= FIELD_PREP(PCI_REBAR_CTRL_BAR_SIZE, size);
1927 		pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
1928 	}
1929 }
1930 
1931 /**
1932  * pci_restore_state - Restore the saved state of a PCI device
1933  * @dev: PCI device that we're dealing with
1934  */
pci_restore_state(struct pci_dev * dev)1935 void pci_restore_state(struct pci_dev *dev)
1936 {
1937 	if (!dev->state_saved)
1938 		return;
1939 
1940 	pci_restore_pcie_state(dev);
1941 	pci_restore_pasid_state(dev);
1942 	pci_restore_pri_state(dev);
1943 	pci_restore_ats_state(dev);
1944 	pci_restore_vc_state(dev);
1945 	pci_restore_rebar_state(dev);
1946 	pci_restore_dpc_state(dev);
1947 	pci_restore_ptm_state(dev);
1948 
1949 	pci_aer_clear_status(dev);
1950 	pci_restore_aer_state(dev);
1951 
1952 	pci_restore_config_space(dev);
1953 
1954 	pci_restore_pcix_state(dev);
1955 	pci_restore_msi_state(dev);
1956 
1957 	/* Restore ACS and IOV configuration state */
1958 	pci_enable_acs(dev);
1959 	pci_restore_iov_state(dev);
1960 
1961 	dev->state_saved = false;
1962 }
1963 EXPORT_SYMBOL(pci_restore_state);
1964 
1965 struct pci_saved_state {
1966 	u32 config_space[16];
1967 	struct pci_cap_saved_data cap[];
1968 };
1969 
1970 /**
1971  * pci_store_saved_state - Allocate and return an opaque struct containing
1972  *			   the device saved state.
1973  * @dev: PCI device that we're dealing with
1974  *
1975  * Return NULL if no state or error.
1976  */
pci_store_saved_state(struct pci_dev * dev)1977 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1978 {
1979 	struct pci_saved_state *state;
1980 	struct pci_cap_saved_state *tmp;
1981 	struct pci_cap_saved_data *cap;
1982 	size_t size;
1983 
1984 	if (!dev->state_saved)
1985 		return NULL;
1986 
1987 	size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1988 
1989 	hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1990 		size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1991 
1992 	state = kzalloc(size, GFP_KERNEL);
1993 	if (!state)
1994 		return NULL;
1995 
1996 	memcpy(state->config_space, dev->saved_config_space,
1997 	       sizeof(state->config_space));
1998 
1999 	cap = state->cap;
2000 	hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
2001 		size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
2002 		memcpy(cap, &tmp->cap, len);
2003 		cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
2004 	}
2005 	/* Empty cap_save terminates list */
2006 
2007 	return state;
2008 }
2009 EXPORT_SYMBOL_GPL(pci_store_saved_state);
2010 
2011 /**
2012  * pci_load_saved_state - Reload the provided save state into struct pci_dev.
2013  * @dev: PCI device that we're dealing with
2014  * @state: Saved state returned from pci_store_saved_state()
2015  */
pci_load_saved_state(struct pci_dev * dev,struct pci_saved_state * state)2016 int pci_load_saved_state(struct pci_dev *dev,
2017 			 struct pci_saved_state *state)
2018 {
2019 	struct pci_cap_saved_data *cap;
2020 
2021 	dev->state_saved = false;
2022 
2023 	if (!state)
2024 		return 0;
2025 
2026 	memcpy(dev->saved_config_space, state->config_space,
2027 	       sizeof(state->config_space));
2028 
2029 	cap = state->cap;
2030 	while (cap->size) {
2031 		struct pci_cap_saved_state *tmp;
2032 
2033 		tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
2034 		if (!tmp || tmp->cap.size != cap->size)
2035 			return -EINVAL;
2036 
2037 		memcpy(tmp->cap.data, cap->data, tmp->cap.size);
2038 		cap = (struct pci_cap_saved_data *)((u8 *)cap +
2039 		       sizeof(struct pci_cap_saved_data) + cap->size);
2040 	}
2041 
2042 	dev->state_saved = true;
2043 	return 0;
2044 }
2045 EXPORT_SYMBOL_GPL(pci_load_saved_state);
2046 
2047 /**
2048  * pci_load_and_free_saved_state - Reload the save state pointed to by state,
2049  *				   and free the memory allocated for it.
2050  * @dev: PCI device that we're dealing with
2051  * @state: Pointer to saved state returned from pci_store_saved_state()
2052  */
pci_load_and_free_saved_state(struct pci_dev * dev,struct pci_saved_state ** state)2053 int pci_load_and_free_saved_state(struct pci_dev *dev,
2054 				  struct pci_saved_state **state)
2055 {
2056 	int ret = pci_load_saved_state(dev, *state);
2057 	kfree(*state);
2058 	*state = NULL;
2059 	return ret;
2060 }
2061 EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
2062 
pcibios_enable_device(struct pci_dev * dev,int bars)2063 int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
2064 {
2065 	return pci_enable_resources(dev, bars);
2066 }
2067 
do_pci_enable_device(struct pci_dev * dev,int bars)2068 static int do_pci_enable_device(struct pci_dev *dev, int bars)
2069 {
2070 	int err;
2071 	struct pci_dev *bridge;
2072 	u16 cmd;
2073 	u8 pin;
2074 
2075 	err = pci_set_power_state(dev, PCI_D0);
2076 	if (err < 0 && err != -EIO)
2077 		return err;
2078 
2079 	bridge = pci_upstream_bridge(dev);
2080 	if (bridge)
2081 		pcie_aspm_powersave_config_link(bridge);
2082 
2083 	err = pcibios_enable_device(dev, bars);
2084 	if (err < 0)
2085 		return err;
2086 	pci_fixup_device(pci_fixup_enable, dev);
2087 
2088 	if (dev->msi_enabled || dev->msix_enabled)
2089 		return 0;
2090 
2091 	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
2092 	if (pin) {
2093 		pci_read_config_word(dev, PCI_COMMAND, &cmd);
2094 		if (cmd & PCI_COMMAND_INTX_DISABLE)
2095 			pci_write_config_word(dev, PCI_COMMAND,
2096 					      cmd & ~PCI_COMMAND_INTX_DISABLE);
2097 	}
2098 
2099 	return 0;
2100 }
2101 
2102 /**
2103  * pci_reenable_device - Resume abandoned device
2104  * @dev: PCI device to be resumed
2105  *
2106  * NOTE: This function is a backend of pci_default_resume() and is not supposed
2107  * to be called by normal code, write proper resume handler and use it instead.
2108  */
pci_reenable_device(struct pci_dev * dev)2109 int pci_reenable_device(struct pci_dev *dev)
2110 {
2111 	if (pci_is_enabled(dev))
2112 		return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
2113 	return 0;
2114 }
2115 EXPORT_SYMBOL(pci_reenable_device);
2116 
pci_enable_bridge(struct pci_dev * dev)2117 static void pci_enable_bridge(struct pci_dev *dev)
2118 {
2119 	struct pci_dev *bridge;
2120 	int retval;
2121 
2122 	bridge = pci_upstream_bridge(dev);
2123 	if (bridge)
2124 		pci_enable_bridge(bridge);
2125 
2126 	if (pci_is_enabled(dev)) {
2127 		if (!dev->is_busmaster)
2128 			pci_set_master(dev);
2129 		return;
2130 	}
2131 
2132 	retval = pci_enable_device(dev);
2133 	if (retval)
2134 		pci_err(dev, "Error enabling bridge (%d), continuing\n",
2135 			retval);
2136 	pci_set_master(dev);
2137 }
2138 
pci_enable_device_flags(struct pci_dev * dev,unsigned long flags)2139 static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
2140 {
2141 	struct pci_dev *bridge;
2142 	int err;
2143 	int i, bars = 0;
2144 
2145 	/*
2146 	 * Power state could be unknown at this point, either due to a fresh
2147 	 * boot or a device removal call.  So get the current power state
2148 	 * so that things like MSI message writing will behave as expected
2149 	 * (e.g. if the device really is in D0 at enable time).
2150 	 */
2151 	pci_update_current_state(dev, dev->current_state);
2152 
2153 	if (atomic_inc_return(&dev->enable_cnt) > 1)
2154 		return 0;		/* already enabled */
2155 
2156 	bridge = pci_upstream_bridge(dev);
2157 	if (bridge)
2158 		pci_enable_bridge(bridge);
2159 
2160 	/* only skip sriov related */
2161 	for (i = 0; i <= PCI_ROM_RESOURCE; i++)
2162 		if (dev->resource[i].flags & flags)
2163 			bars |= (1 << i);
2164 	for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
2165 		if (dev->resource[i].flags & flags)
2166 			bars |= (1 << i);
2167 
2168 	err = do_pci_enable_device(dev, bars);
2169 	if (err < 0)
2170 		atomic_dec(&dev->enable_cnt);
2171 	return err;
2172 }
2173 
2174 /**
2175  * pci_enable_device_mem - Initialize a device for use with Memory space
2176  * @dev: PCI device to be initialized
2177  *
2178  * Initialize device before it's used by a driver. Ask low-level code
2179  * to enable Memory resources. Wake up the device if it was suspended.
2180  * Beware, this function can fail.
2181  */
pci_enable_device_mem(struct pci_dev * dev)2182 int pci_enable_device_mem(struct pci_dev *dev)
2183 {
2184 	return pci_enable_device_flags(dev, IORESOURCE_MEM);
2185 }
2186 EXPORT_SYMBOL(pci_enable_device_mem);
2187 
2188 /**
2189  * pci_enable_device - Initialize device before it's used by a driver.
2190  * @dev: PCI device to be initialized
2191  *
2192  * Initialize device before it's used by a driver. Ask low-level code
2193  * to enable I/O and memory. Wake up the device if it was suspended.
2194  * Beware, this function can fail.
2195  *
2196  * Note we don't actually enable the device many times if we call
2197  * this function repeatedly (we just increment the count).
2198  */
pci_enable_device(struct pci_dev * dev)2199 int pci_enable_device(struct pci_dev *dev)
2200 {
2201 	return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
2202 }
2203 EXPORT_SYMBOL(pci_enable_device);
2204 
2205 /*
2206  * pcibios_device_add - provide arch specific hooks when adding device dev
2207  * @dev: the PCI device being added
2208  *
2209  * Permits the platform to provide architecture specific functionality when
2210  * devices are added. This is the default implementation. Architecture
2211  * implementations can override this.
2212  */
pcibios_device_add(struct pci_dev * dev)2213 int __weak pcibios_device_add(struct pci_dev *dev)
2214 {
2215 	return 0;
2216 }
2217 
2218 /**
2219  * pcibios_release_device - provide arch specific hooks when releasing
2220  *			    device dev
2221  * @dev: the PCI device being released
2222  *
2223  * Permits the platform to provide architecture specific functionality when
2224  * devices are released. This is the default implementation. Architecture
2225  * implementations can override this.
2226  */
pcibios_release_device(struct pci_dev * dev)2227 void __weak pcibios_release_device(struct pci_dev *dev) {}
2228 
2229 /**
2230  * pcibios_disable_device - disable arch specific PCI resources for device dev
2231  * @dev: the PCI device to disable
2232  *
2233  * Disables architecture specific PCI resources for the device. This
2234  * is the default implementation. Architecture implementations can
2235  * override this.
2236  */
pcibios_disable_device(struct pci_dev * dev)2237 void __weak pcibios_disable_device(struct pci_dev *dev) {}
2238 
do_pci_disable_device(struct pci_dev * dev)2239 static void do_pci_disable_device(struct pci_dev *dev)
2240 {
2241 	u16 pci_command;
2242 
2243 	pci_read_config_word(dev, PCI_COMMAND, &pci_command);
2244 	if (pci_command & PCI_COMMAND_MASTER) {
2245 		pci_command &= ~PCI_COMMAND_MASTER;
2246 		pci_write_config_word(dev, PCI_COMMAND, pci_command);
2247 	}
2248 
2249 	pcibios_disable_device(dev);
2250 }
2251 
2252 /**
2253  * pci_disable_enabled_device - Disable device without updating enable_cnt
2254  * @dev: PCI device to disable
2255  *
2256  * NOTE: This function is a backend of PCI power management routines and is
2257  * not supposed to be called drivers.
2258  */
pci_disable_enabled_device(struct pci_dev * dev)2259 void pci_disable_enabled_device(struct pci_dev *dev)
2260 {
2261 	if (pci_is_enabled(dev))
2262 		do_pci_disable_device(dev);
2263 }
2264 
2265 /**
2266  * pci_disable_device - Disable PCI device after use
2267  * @dev: PCI device to be disabled
2268  *
2269  * Signal to the system that the PCI device is not in use by the system
2270  * anymore.  This only involves disabling PCI bus-mastering, if active.
2271  *
2272  * Note we don't actually disable the device until all callers of
2273  * pci_enable_device() have called pci_disable_device().
2274  */
pci_disable_device(struct pci_dev * dev)2275 void pci_disable_device(struct pci_dev *dev)
2276 {
2277 	dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
2278 		      "disabling already-disabled device");
2279 
2280 	if (atomic_dec_return(&dev->enable_cnt) != 0)
2281 		return;
2282 
2283 	do_pci_disable_device(dev);
2284 
2285 	dev->is_busmaster = 0;
2286 }
2287 EXPORT_SYMBOL(pci_disable_device);
2288 
2289 /**
2290  * pcibios_set_pcie_reset_state - set reset state for device dev
2291  * @dev: the PCIe device reset
2292  * @state: Reset state to enter into
2293  *
2294  * Set the PCIe reset state for the device. This is the default
2295  * implementation. Architecture implementations can override this.
2296  */
pcibios_set_pcie_reset_state(struct pci_dev * dev,enum pcie_reset_state state)2297 int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
2298 					enum pcie_reset_state state)
2299 {
2300 	return -EINVAL;
2301 }
2302 
2303 /**
2304  * pci_set_pcie_reset_state - set reset state for device dev
2305  * @dev: the PCIe device reset
2306  * @state: Reset state to enter into
2307  *
2308  * Sets the PCI reset state for the device.
2309  */
pci_set_pcie_reset_state(struct pci_dev * dev,enum pcie_reset_state state)2310 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
2311 {
2312 	return pcibios_set_pcie_reset_state(dev, state);
2313 }
2314 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
2315 
2316 #ifdef CONFIG_PCIEAER
pcie_clear_device_status(struct pci_dev * dev)2317 void pcie_clear_device_status(struct pci_dev *dev)
2318 {
2319 	u16 sta;
2320 
2321 	pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta);
2322 	pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta);
2323 }
2324 #endif
2325 
2326 /**
2327  * pcie_clear_root_pme_status - Clear root port PME interrupt status.
2328  * @dev: PCIe root port or event collector.
2329  */
pcie_clear_root_pme_status(struct pci_dev * dev)2330 void pcie_clear_root_pme_status(struct pci_dev *dev)
2331 {
2332 	pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME);
2333 }
2334 
2335 /**
2336  * pci_check_pme_status - Check if given device has generated PME.
2337  * @dev: Device to check.
2338  *
2339  * Check the PME status of the device and if set, clear it and clear PME enable
2340  * (if set).  Return 'true' if PME status and PME enable were both set or
2341  * 'false' otherwise.
2342  */
pci_check_pme_status(struct pci_dev * dev)2343 bool pci_check_pme_status(struct pci_dev *dev)
2344 {
2345 	int pmcsr_pos;
2346 	u16 pmcsr;
2347 	bool ret = false;
2348 
2349 	if (!dev->pm_cap)
2350 		return false;
2351 
2352 	pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
2353 	pci_read_config_word(dev, pmcsr_pos, &pmcsr);
2354 	if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
2355 		return false;
2356 
2357 	/* Clear PME status. */
2358 	pmcsr |= PCI_PM_CTRL_PME_STATUS;
2359 	if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
2360 		/* Disable PME to avoid interrupt flood. */
2361 		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2362 		ret = true;
2363 	}
2364 
2365 	pci_write_config_word(dev, pmcsr_pos, pmcsr);
2366 
2367 	return ret;
2368 }
2369 
2370 /**
2371  * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
2372  * @dev: Device to handle.
2373  * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
2374  *
2375  * Check if @dev has generated PME and queue a resume request for it in that
2376  * case.
2377  */
pci_pme_wakeup(struct pci_dev * dev,void * pme_poll_reset)2378 static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
2379 {
2380 	if (pme_poll_reset && dev->pme_poll)
2381 		dev->pme_poll = false;
2382 
2383 	if (pci_check_pme_status(dev)) {
2384 		pci_wakeup_event(dev);
2385 		pm_request_resume(&dev->dev);
2386 	}
2387 	return 0;
2388 }
2389 
2390 /**
2391  * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
2392  * @bus: Top bus of the subtree to walk.
2393  */
pci_pme_wakeup_bus(struct pci_bus * bus)2394 void pci_pme_wakeup_bus(struct pci_bus *bus)
2395 {
2396 	if (bus)
2397 		pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
2398 }
2399 
2400 
2401 /**
2402  * pci_pme_capable - check the capability of PCI device to generate PME#
2403  * @dev: PCI device to handle.
2404  * @state: PCI state from which device will issue PME#.
2405  */
pci_pme_capable(struct pci_dev * dev,pci_power_t state)2406 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
2407 {
2408 	if (!dev->pm_cap)
2409 		return false;
2410 
2411 	return !!(dev->pme_support & (1 << state));
2412 }
2413 EXPORT_SYMBOL(pci_pme_capable);
2414 
pci_pme_list_scan(struct work_struct * work)2415 static void pci_pme_list_scan(struct work_struct *work)
2416 {
2417 	struct pci_pme_device *pme_dev, *n;
2418 
2419 	mutex_lock(&pci_pme_list_mutex);
2420 	list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
2421 		struct pci_dev *pdev = pme_dev->dev;
2422 
2423 		if (pdev->pme_poll) {
2424 			struct pci_dev *bridge = pdev->bus->self;
2425 			struct device *dev = &pdev->dev;
2426 			struct device *bdev = bridge ? &bridge->dev : NULL;
2427 			int bref = 0;
2428 
2429 			/*
2430 			 * If we have a bridge, it should be in an active/D0
2431 			 * state or the configuration space of subordinate
2432 			 * devices may not be accessible or stable over the
2433 			 * course of the call.
2434 			 */
2435 			if (bdev) {
2436 				bref = pm_runtime_get_if_active(bdev);
2437 				if (!bref)
2438 					continue;
2439 
2440 				if (bridge->current_state != PCI_D0)
2441 					goto put_bridge;
2442 			}
2443 
2444 			/*
2445 			 * The device itself should be suspended but config
2446 			 * space must be accessible, therefore it cannot be in
2447 			 * D3cold.
2448 			 */
2449 			if (pm_runtime_suspended(dev) &&
2450 			    pdev->current_state != PCI_D3cold)
2451 				pci_pme_wakeup(pdev, NULL);
2452 
2453 put_bridge:
2454 			if (bref > 0)
2455 				pm_runtime_put(bdev);
2456 		} else {
2457 			list_del(&pme_dev->list);
2458 			kfree(pme_dev);
2459 		}
2460 	}
2461 	if (!list_empty(&pci_pme_list))
2462 		queue_delayed_work(system_freezable_wq, &pci_pme_work,
2463 				   msecs_to_jiffies(PME_TIMEOUT));
2464 	mutex_unlock(&pci_pme_list_mutex);
2465 }
2466 
__pci_pme_active(struct pci_dev * dev,bool enable)2467 static void __pci_pme_active(struct pci_dev *dev, bool enable)
2468 {
2469 	u16 pmcsr;
2470 
2471 	if (!dev->pme_support)
2472 		return;
2473 
2474 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2475 	/* Clear PME_Status by writing 1 to it and enable PME# */
2476 	pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
2477 	if (!enable)
2478 		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2479 
2480 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2481 }
2482 
2483 /**
2484  * pci_pme_restore - Restore PME configuration after config space restore.
2485  * @dev: PCI device to update.
2486  */
pci_pme_restore(struct pci_dev * dev)2487 void pci_pme_restore(struct pci_dev *dev)
2488 {
2489 	u16 pmcsr;
2490 
2491 	if (!dev->pme_support)
2492 		return;
2493 
2494 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2495 	if (dev->wakeup_prepared) {
2496 		pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2497 		pmcsr &= ~PCI_PM_CTRL_PME_STATUS;
2498 	} else {
2499 		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2500 		pmcsr |= PCI_PM_CTRL_PME_STATUS;
2501 	}
2502 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2503 }
2504 
2505 /**
2506  * pci_pme_active - enable or disable PCI device's PME# function
2507  * @dev: PCI device to handle.
2508  * @enable: 'true' to enable PME# generation; 'false' to disable it.
2509  *
2510  * The caller must verify that the device is capable of generating PME# before
2511  * calling this function with @enable equal to 'true'.
2512  */
pci_pme_active(struct pci_dev * dev,bool enable)2513 void pci_pme_active(struct pci_dev *dev, bool enable)
2514 {
2515 	__pci_pme_active(dev, enable);
2516 
2517 	/*
2518 	 * PCI (as opposed to PCIe) PME requires that the device have
2519 	 * its PME# line hooked up correctly. Not all hardware vendors
2520 	 * do this, so the PME never gets delivered and the device
2521 	 * remains asleep. The easiest way around this is to
2522 	 * periodically walk the list of suspended devices and check
2523 	 * whether any have their PME flag set. The assumption is that
2524 	 * we'll wake up often enough anyway that this won't be a huge
2525 	 * hit, and the power savings from the devices will still be a
2526 	 * win.
2527 	 *
2528 	 * Although PCIe uses in-band PME message instead of PME# line
2529 	 * to report PME, PME does not work for some PCIe devices in
2530 	 * reality.  For example, there are devices that set their PME
2531 	 * status bits, but don't really bother to send a PME message;
2532 	 * there are PCI Express Root Ports that don't bother to
2533 	 * trigger interrupts when they receive PME messages from the
2534 	 * devices below.  So PME poll is used for PCIe devices too.
2535 	 */
2536 
2537 	if (dev->pme_poll) {
2538 		struct pci_pme_device *pme_dev;
2539 		if (enable) {
2540 			pme_dev = kmalloc(sizeof(struct pci_pme_device),
2541 					  GFP_KERNEL);
2542 			if (!pme_dev) {
2543 				pci_warn(dev, "can't enable PME#\n");
2544 				return;
2545 			}
2546 			pme_dev->dev = dev;
2547 			mutex_lock(&pci_pme_list_mutex);
2548 			list_add(&pme_dev->list, &pci_pme_list);
2549 			if (list_is_singular(&pci_pme_list))
2550 				queue_delayed_work(system_freezable_wq,
2551 						   &pci_pme_work,
2552 						   msecs_to_jiffies(PME_TIMEOUT));
2553 			mutex_unlock(&pci_pme_list_mutex);
2554 		} else {
2555 			mutex_lock(&pci_pme_list_mutex);
2556 			list_for_each_entry(pme_dev, &pci_pme_list, list) {
2557 				if (pme_dev->dev == dev) {
2558 					list_del(&pme_dev->list);
2559 					kfree(pme_dev);
2560 					break;
2561 				}
2562 			}
2563 			mutex_unlock(&pci_pme_list_mutex);
2564 		}
2565 	}
2566 
2567 	pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled");
2568 }
2569 EXPORT_SYMBOL(pci_pme_active);
2570 
2571 /**
2572  * __pci_enable_wake - enable PCI device as wakeup event source
2573  * @dev: PCI device affected
2574  * @state: PCI state from which device will issue wakeup events
2575  * @enable: True to enable event generation; false to disable
2576  *
2577  * This enables the device as a wakeup event source, or disables it.
2578  * When such events involves platform-specific hooks, those hooks are
2579  * called automatically by this routine.
2580  *
2581  * Devices with legacy power management (no standard PCI PM capabilities)
2582  * always require such platform hooks.
2583  *
2584  * RETURN VALUE:
2585  * 0 is returned on success
2586  * -EINVAL is returned if device is not supposed to wake up the system
2587  * Error code depending on the platform is returned if both the platform and
2588  * the native mechanism fail to enable the generation of wake-up events
2589  */
__pci_enable_wake(struct pci_dev * dev,pci_power_t state,bool enable)2590 static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
2591 {
2592 	int ret = 0;
2593 
2594 	/*
2595 	 * Bridges that are not power-manageable directly only signal
2596 	 * wakeup on behalf of subordinate devices which is set up
2597 	 * elsewhere, so skip them. However, bridges that are
2598 	 * power-manageable may signal wakeup for themselves (for example,
2599 	 * on a hotplug event) and they need to be covered here.
2600 	 */
2601 	if (!pci_power_manageable(dev))
2602 		return 0;
2603 
2604 	/* Don't do the same thing twice in a row for one device. */
2605 	if (!!enable == !!dev->wakeup_prepared)
2606 		return 0;
2607 
2608 	/*
2609 	 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
2610 	 * Anderson we should be doing PME# wake enable followed by ACPI wake
2611 	 * enable.  To disable wake-up we call the platform first, for symmetry.
2612 	 */
2613 
2614 	if (enable) {
2615 		int error;
2616 
2617 		/*
2618 		 * Enable PME signaling if the device can signal PME from
2619 		 * D3cold regardless of whether or not it can signal PME from
2620 		 * the current target state, because that will allow it to
2621 		 * signal PME when the hierarchy above it goes into D3cold and
2622 		 * the device itself ends up in D3cold as a result of that.
2623 		 */
2624 		if (pci_pme_capable(dev, state) || pci_pme_capable(dev, PCI_D3cold))
2625 			pci_pme_active(dev, true);
2626 		else
2627 			ret = 1;
2628 		error = platform_pci_set_wakeup(dev, true);
2629 		if (ret)
2630 			ret = error;
2631 		if (!ret)
2632 			dev->wakeup_prepared = true;
2633 	} else {
2634 		platform_pci_set_wakeup(dev, false);
2635 		pci_pme_active(dev, false);
2636 		dev->wakeup_prepared = false;
2637 	}
2638 
2639 	return ret;
2640 }
2641 
2642 /**
2643  * pci_enable_wake - change wakeup settings for a PCI device
2644  * @pci_dev: Target device
2645  * @state: PCI state from which device will issue wakeup events
2646  * @enable: Whether or not to enable event generation
2647  *
2648  * If @enable is set, check device_may_wakeup() for the device before calling
2649  * __pci_enable_wake() for it.
2650  */
pci_enable_wake(struct pci_dev * pci_dev,pci_power_t state,bool enable)2651 int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
2652 {
2653 	if (enable && !device_may_wakeup(&pci_dev->dev))
2654 		return -EINVAL;
2655 
2656 	return __pci_enable_wake(pci_dev, state, enable);
2657 }
2658 EXPORT_SYMBOL(pci_enable_wake);
2659 
2660 /**
2661  * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
2662  * @dev: PCI device to prepare
2663  * @enable: True to enable wake-up event generation; false to disable
2664  *
2665  * Many drivers want the device to wake up the system from D3_hot or D3_cold
2666  * and this function allows them to set that up cleanly - pci_enable_wake()
2667  * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
2668  * ordering constraints.
2669  *
2670  * This function only returns error code if the device is not allowed to wake
2671  * up the system from sleep or it is not capable of generating PME# from both
2672  * D3_hot and D3_cold and the platform is unable to enable wake-up power for it.
2673  */
pci_wake_from_d3(struct pci_dev * dev,bool enable)2674 int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2675 {
2676 	return pci_pme_capable(dev, PCI_D3cold) ?
2677 			pci_enable_wake(dev, PCI_D3cold, enable) :
2678 			pci_enable_wake(dev, PCI_D3hot, enable);
2679 }
2680 EXPORT_SYMBOL(pci_wake_from_d3);
2681 
2682 /**
2683  * pci_target_state - find an appropriate low power state for a given PCI dev
2684  * @dev: PCI device
2685  * @wakeup: Whether or not wakeup functionality will be enabled for the device.
2686  *
2687  * Use underlying platform code to find a supported low power state for @dev.
2688  * If the platform can't manage @dev, return the deepest state from which it
2689  * can generate wake events, based on any available PME info.
2690  */
pci_target_state(struct pci_dev * dev,bool wakeup)2691 static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
2692 {
2693 	if (platform_pci_power_manageable(dev)) {
2694 		/*
2695 		 * Call the platform to find the target state for the device.
2696 		 */
2697 		pci_power_t state = platform_pci_choose_state(dev);
2698 
2699 		switch (state) {
2700 		case PCI_POWER_ERROR:
2701 		case PCI_UNKNOWN:
2702 			return PCI_D3hot;
2703 
2704 		case PCI_D1:
2705 		case PCI_D2:
2706 			if (pci_no_d1d2(dev))
2707 				return PCI_D3hot;
2708 		}
2709 
2710 		return state;
2711 	}
2712 
2713 	/*
2714 	 * If the device is in D3cold even though it's not power-manageable by
2715 	 * the platform, it may have been powered down by non-standard means.
2716 	 * Best to let it slumber.
2717 	 */
2718 	if (dev->current_state == PCI_D3cold)
2719 		return PCI_D3cold;
2720 	else if (!dev->pm_cap)
2721 		return PCI_D0;
2722 
2723 	if (wakeup && dev->pme_support) {
2724 		pci_power_t state = PCI_D3hot;
2725 
2726 		/*
2727 		 * Find the deepest state from which the device can generate
2728 		 * PME#.
2729 		 */
2730 		while (state && !(dev->pme_support & (1 << state)))
2731 			state--;
2732 
2733 		if (state)
2734 			return state;
2735 		else if (dev->pme_support & 1)
2736 			return PCI_D0;
2737 	}
2738 
2739 	return PCI_D3hot;
2740 }
2741 
2742 /**
2743  * pci_prepare_to_sleep - prepare PCI device for system-wide transition
2744  *			  into a sleep state
2745  * @dev: Device to handle.
2746  *
2747  * Choose the power state appropriate for the device depending on whether
2748  * it can wake up the system and/or is power manageable by the platform
2749  * (PCI_D3hot is the default) and put the device into that state.
2750  */
pci_prepare_to_sleep(struct pci_dev * dev)2751 int pci_prepare_to_sleep(struct pci_dev *dev)
2752 {
2753 	bool wakeup = device_may_wakeup(&dev->dev);
2754 	pci_power_t target_state = pci_target_state(dev, wakeup);
2755 	int error;
2756 
2757 	if (target_state == PCI_POWER_ERROR)
2758 		return -EIO;
2759 
2760 	pci_enable_wake(dev, target_state, wakeup);
2761 
2762 	error = pci_set_power_state(dev, target_state);
2763 
2764 	if (error)
2765 		pci_enable_wake(dev, target_state, false);
2766 
2767 	return error;
2768 }
2769 EXPORT_SYMBOL(pci_prepare_to_sleep);
2770 
2771 /**
2772  * pci_back_from_sleep - turn PCI device on during system-wide transition
2773  *			 into working state
2774  * @dev: Device to handle.
2775  *
2776  * Disable device's system wake-up capability and put it into D0.
2777  */
pci_back_from_sleep(struct pci_dev * dev)2778 int pci_back_from_sleep(struct pci_dev *dev)
2779 {
2780 	int ret = pci_set_power_state(dev, PCI_D0);
2781 
2782 	if (ret)
2783 		return ret;
2784 
2785 	pci_enable_wake(dev, PCI_D0, false);
2786 	return 0;
2787 }
2788 EXPORT_SYMBOL(pci_back_from_sleep);
2789 
2790 /**
2791  * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
2792  * @dev: PCI device being suspended.
2793  *
2794  * Prepare @dev to generate wake-up events at run time and put it into a low
2795  * power state.
2796  */
pci_finish_runtime_suspend(struct pci_dev * dev)2797 int pci_finish_runtime_suspend(struct pci_dev *dev)
2798 {
2799 	pci_power_t target_state;
2800 	int error;
2801 
2802 	target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
2803 	if (target_state == PCI_POWER_ERROR)
2804 		return -EIO;
2805 
2806 	__pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
2807 
2808 	error = pci_set_power_state(dev, target_state);
2809 
2810 	if (error)
2811 		pci_enable_wake(dev, target_state, false);
2812 
2813 	return error;
2814 }
2815 
2816 /**
2817  * pci_dev_run_wake - Check if device can generate run-time wake-up events.
2818  * @dev: Device to check.
2819  *
2820  * Return true if the device itself is capable of generating wake-up events
2821  * (through the platform or using the native PCIe PME) or if the device supports
2822  * PME and one of its upstream bridges can generate wake-up events.
2823  */
pci_dev_run_wake(struct pci_dev * dev)2824 bool pci_dev_run_wake(struct pci_dev *dev)
2825 {
2826 	struct pci_bus *bus = dev->bus;
2827 
2828 	if (!dev->pme_support)
2829 		return false;
2830 
2831 	/* PME-capable in principle, but not from the target power state */
2832 	if (!pci_pme_capable(dev, pci_target_state(dev, true)))
2833 		return false;
2834 
2835 	if (device_can_wakeup(&dev->dev))
2836 		return true;
2837 
2838 	while (bus->parent) {
2839 		struct pci_dev *bridge = bus->self;
2840 
2841 		if (device_can_wakeup(&bridge->dev))
2842 			return true;
2843 
2844 		bus = bus->parent;
2845 	}
2846 
2847 	/* We have reached the root bus. */
2848 	if (bus->bridge)
2849 		return device_can_wakeup(bus->bridge);
2850 
2851 	return false;
2852 }
2853 EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2854 
2855 /**
2856  * pci_dev_need_resume - Check if it is necessary to resume the device.
2857  * @pci_dev: Device to check.
2858  *
2859  * Return 'true' if the device is not runtime-suspended or it has to be
2860  * reconfigured due to wakeup settings difference between system and runtime
2861  * suspend, or the current power state of it is not suitable for the upcoming
2862  * (system-wide) transition.
2863  */
pci_dev_need_resume(struct pci_dev * pci_dev)2864 bool pci_dev_need_resume(struct pci_dev *pci_dev)
2865 {
2866 	struct device *dev = &pci_dev->dev;
2867 	pci_power_t target_state;
2868 
2869 	if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev))
2870 		return true;
2871 
2872 	target_state = pci_target_state(pci_dev, device_may_wakeup(dev));
2873 
2874 	/*
2875 	 * If the earlier platform check has not triggered, D3cold is just power
2876 	 * removal on top of D3hot, so no need to resume the device in that
2877 	 * case.
2878 	 */
2879 	return target_state != pci_dev->current_state &&
2880 		target_state != PCI_D3cold &&
2881 		pci_dev->current_state != PCI_D3hot;
2882 }
2883 
2884 /**
2885  * pci_dev_adjust_pme - Adjust PME setting for a suspended device.
2886  * @pci_dev: Device to check.
2887  *
2888  * If the device is suspended and it is not configured for system wakeup,
2889  * disable PME for it to prevent it from waking up the system unnecessarily.
2890  *
2891  * Note that if the device's power state is D3cold and the platform check in
2892  * pci_dev_need_resume() has not triggered, the device's configuration need not
2893  * be changed.
2894  */
pci_dev_adjust_pme(struct pci_dev * pci_dev)2895 void pci_dev_adjust_pme(struct pci_dev *pci_dev)
2896 {
2897 	struct device *dev = &pci_dev->dev;
2898 
2899 	spin_lock_irq(&dev->power.lock);
2900 
2901 	if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) &&
2902 	    pci_dev->current_state < PCI_D3cold)
2903 		__pci_pme_active(pci_dev, false);
2904 
2905 	spin_unlock_irq(&dev->power.lock);
2906 }
2907 
2908 /**
2909  * pci_dev_complete_resume - Finalize resume from system sleep for a device.
2910  * @pci_dev: Device to handle.
2911  *
2912  * If the device is runtime suspended and wakeup-capable, enable PME for it as
2913  * it might have been disabled during the prepare phase of system suspend if
2914  * the device was not configured for system wakeup.
2915  */
pci_dev_complete_resume(struct pci_dev * pci_dev)2916 void pci_dev_complete_resume(struct pci_dev *pci_dev)
2917 {
2918 	struct device *dev = &pci_dev->dev;
2919 
2920 	if (!pci_dev_run_wake(pci_dev))
2921 		return;
2922 
2923 	spin_lock_irq(&dev->power.lock);
2924 
2925 	if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
2926 		__pci_pme_active(pci_dev, true);
2927 
2928 	spin_unlock_irq(&dev->power.lock);
2929 }
2930 
2931 /**
2932  * pci_choose_state - Choose the power state of a PCI device.
2933  * @dev: Target PCI device.
2934  * @state: Target state for the whole system.
2935  *
2936  * Returns PCI power state suitable for @dev and @state.
2937  */
pci_choose_state(struct pci_dev * dev,pm_message_t state)2938 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
2939 {
2940 	if (state.event == PM_EVENT_ON)
2941 		return PCI_D0;
2942 
2943 	return pci_target_state(dev, false);
2944 }
2945 EXPORT_SYMBOL(pci_choose_state);
2946 
pci_config_pm_runtime_get(struct pci_dev * pdev)2947 void pci_config_pm_runtime_get(struct pci_dev *pdev)
2948 {
2949 	struct device *dev = &pdev->dev;
2950 	struct device *parent = dev->parent;
2951 
2952 	if (parent)
2953 		pm_runtime_get_sync(parent);
2954 	pm_runtime_get_noresume(dev);
2955 	/*
2956 	 * pdev->current_state is set to PCI_D3cold during suspending,
2957 	 * so wait until suspending completes
2958 	 */
2959 	pm_runtime_barrier(dev);
2960 	/*
2961 	 * Only need to resume devices in D3cold, because config
2962 	 * registers are still accessible for devices suspended but
2963 	 * not in D3cold.
2964 	 */
2965 	if (pdev->current_state == PCI_D3cold)
2966 		pm_runtime_resume(dev);
2967 }
2968 
pci_config_pm_runtime_put(struct pci_dev * pdev)2969 void pci_config_pm_runtime_put(struct pci_dev *pdev)
2970 {
2971 	struct device *dev = &pdev->dev;
2972 	struct device *parent = dev->parent;
2973 
2974 	pm_runtime_put(dev);
2975 	if (parent)
2976 		pm_runtime_put_sync(parent);
2977 }
2978 
2979 static const struct dmi_system_id bridge_d3_blacklist[] = {
2980 #ifdef CONFIG_X86
2981 	{
2982 		/*
2983 		 * Gigabyte X299 root port is not marked as hotplug capable
2984 		 * which allows Linux to power manage it.  However, this
2985 		 * confuses the BIOS SMI handler so don't power manage root
2986 		 * ports on that system.
2987 		 */
2988 		.ident = "X299 DESIGNARE EX-CF",
2989 		.matches = {
2990 			DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
2991 			DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
2992 		},
2993 	},
2994 	{
2995 		/*
2996 		 * Downstream device is not accessible after putting a root port
2997 		 * into D3cold and back into D0 on Elo Continental Z2 board
2998 		 */
2999 		.ident = "Elo Continental Z2",
3000 		.matches = {
3001 			DMI_MATCH(DMI_BOARD_VENDOR, "Elo Touch Solutions"),
3002 			DMI_MATCH(DMI_BOARD_NAME, "Geminilake"),
3003 			DMI_MATCH(DMI_BOARD_VERSION, "Continental Z2"),
3004 		},
3005 	},
3006 	{
3007 		/*
3008 		 * Changing power state of root port dGPU is connected fails
3009 		 * https://gitlab.freedesktop.org/drm/amd/-/issues/3229
3010 		 */
3011 		.ident = "Hewlett-Packard HP Pavilion 17 Notebook PC/1972",
3012 		.matches = {
3013 			DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
3014 			DMI_MATCH(DMI_BOARD_NAME, "1972"),
3015 			DMI_MATCH(DMI_BOARD_VERSION, "95.33"),
3016 		},
3017 	},
3018 #endif
3019 	{ }
3020 };
3021 
3022 /**
3023  * pci_bridge_d3_possible - Is it possible to put the bridge into D3
3024  * @bridge: Bridge to check
3025  *
3026  * This function checks if it is possible to move the bridge to D3.
3027  * Currently we only allow D3 for recent enough PCIe ports and Thunderbolt.
3028  */
pci_bridge_d3_possible(struct pci_dev * bridge)3029 bool pci_bridge_d3_possible(struct pci_dev *bridge)
3030 {
3031 	if (!pci_is_pcie(bridge))
3032 		return false;
3033 
3034 	switch (pci_pcie_type(bridge)) {
3035 	case PCI_EXP_TYPE_ROOT_PORT:
3036 	case PCI_EXP_TYPE_UPSTREAM:
3037 	case PCI_EXP_TYPE_DOWNSTREAM:
3038 		if (pci_bridge_d3_disable)
3039 			return false;
3040 
3041 		/*
3042 		 * Hotplug ports handled by firmware in System Management Mode
3043 		 * may not be put into D3 by the OS (Thunderbolt on non-Macs).
3044 		 */
3045 		if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
3046 			return false;
3047 
3048 		if (pci_bridge_d3_force)
3049 			return true;
3050 
3051 		/* Even the oldest 2010 Thunderbolt controller supports D3. */
3052 		if (bridge->is_thunderbolt)
3053 			return true;
3054 
3055 		/* Platform might know better if the bridge supports D3 */
3056 		if (platform_pci_bridge_d3(bridge))
3057 			return true;
3058 
3059 		/*
3060 		 * Hotplug ports handled natively by the OS were not validated
3061 		 * by vendors for runtime D3 at least until 2018 because there
3062 		 * was no OS support.
3063 		 */
3064 		if (bridge->is_hotplug_bridge)
3065 			return false;
3066 
3067 		if (dmi_check_system(bridge_d3_blacklist))
3068 			return false;
3069 
3070 		/*
3071 		 * It should be safe to put PCIe ports from 2015 or newer
3072 		 * to D3.
3073 		 */
3074 		if (dmi_get_bios_year() >= 2015)
3075 			return true;
3076 		break;
3077 	}
3078 
3079 	return false;
3080 }
3081 
pci_dev_check_d3cold(struct pci_dev * dev,void * data)3082 static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
3083 {
3084 	bool *d3cold_ok = data;
3085 
3086 	if (/* The device needs to be allowed to go D3cold ... */
3087 	    dev->no_d3cold || !dev->d3cold_allowed ||
3088 
3089 	    /* ... and if it is wakeup capable to do so from D3cold. */
3090 	    (device_may_wakeup(&dev->dev) &&
3091 	     !pci_pme_capable(dev, PCI_D3cold)) ||
3092 
3093 	    /* If it is a bridge it must be allowed to go to D3. */
3094 	    !pci_power_manageable(dev))
3095 
3096 		*d3cold_ok = false;
3097 
3098 	return !*d3cold_ok;
3099 }
3100 
3101 /*
3102  * pci_bridge_d3_update - Update bridge D3 capabilities
3103  * @dev: PCI device which is changed
3104  *
3105  * Update upstream bridge PM capabilities accordingly depending on if the
3106  * device PM configuration was changed or the device is being removed.  The
3107  * change is also propagated upstream.
3108  */
pci_bridge_d3_update(struct pci_dev * dev)3109 void pci_bridge_d3_update(struct pci_dev *dev)
3110 {
3111 	bool remove = !device_is_registered(&dev->dev);
3112 	struct pci_dev *bridge;
3113 	bool d3cold_ok = true;
3114 
3115 	bridge = pci_upstream_bridge(dev);
3116 	if (!bridge || !pci_bridge_d3_possible(bridge))
3117 		return;
3118 
3119 	/*
3120 	 * If D3 is currently allowed for the bridge, removing one of its
3121 	 * children won't change that.
3122 	 */
3123 	if (remove && bridge->bridge_d3)
3124 		return;
3125 
3126 	/*
3127 	 * If D3 is currently allowed for the bridge and a child is added or
3128 	 * changed, disallowance of D3 can only be caused by that child, so
3129 	 * we only need to check that single device, not any of its siblings.
3130 	 *
3131 	 * If D3 is currently not allowed for the bridge, checking the device
3132 	 * first may allow us to skip checking its siblings.
3133 	 */
3134 	if (!remove)
3135 		pci_dev_check_d3cold(dev, &d3cold_ok);
3136 
3137 	/*
3138 	 * If D3 is currently not allowed for the bridge, this may be caused
3139 	 * either by the device being changed/removed or any of its siblings,
3140 	 * so we need to go through all children to find out if one of them
3141 	 * continues to block D3.
3142 	 */
3143 	if (d3cold_ok && !bridge->bridge_d3)
3144 		pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
3145 			     &d3cold_ok);
3146 
3147 	if (bridge->bridge_d3 != d3cold_ok) {
3148 		bridge->bridge_d3 = d3cold_ok;
3149 		/* Propagate change to upstream bridges */
3150 		pci_bridge_d3_update(bridge);
3151 	}
3152 }
3153 
3154 /**
3155  * pci_d3cold_enable - Enable D3cold for device
3156  * @dev: PCI device to handle
3157  *
3158  * This function can be used in drivers to enable D3cold from the device
3159  * they handle.  It also updates upstream PCI bridge PM capabilities
3160  * accordingly.
3161  */
pci_d3cold_enable(struct pci_dev * dev)3162 void pci_d3cold_enable(struct pci_dev *dev)
3163 {
3164 	if (dev->no_d3cold) {
3165 		dev->no_d3cold = false;
3166 		pci_bridge_d3_update(dev);
3167 	}
3168 }
3169 EXPORT_SYMBOL_GPL(pci_d3cold_enable);
3170 
3171 /**
3172  * pci_d3cold_disable - Disable D3cold for device
3173  * @dev: PCI device to handle
3174  *
3175  * This function can be used in drivers to disable D3cold from the device
3176  * they handle.  It also updates upstream PCI bridge PM capabilities
3177  * accordingly.
3178  */
pci_d3cold_disable(struct pci_dev * dev)3179 void pci_d3cold_disable(struct pci_dev *dev)
3180 {
3181 	if (!dev->no_d3cold) {
3182 		dev->no_d3cold = true;
3183 		pci_bridge_d3_update(dev);
3184 	}
3185 }
3186 EXPORT_SYMBOL_GPL(pci_d3cold_disable);
3187 
3188 /**
3189  * pci_pm_init - Initialize PM functions of given PCI device
3190  * @dev: PCI device to handle.
3191  */
pci_pm_init(struct pci_dev * dev)3192 void pci_pm_init(struct pci_dev *dev)
3193 {
3194 	int pm;
3195 	u16 status;
3196 	u16 pmc;
3197 
3198 	pm_runtime_forbid(&dev->dev);
3199 	pm_runtime_set_active(&dev->dev);
3200 	pm_runtime_enable(&dev->dev);
3201 	device_enable_async_suspend(&dev->dev);
3202 	dev->wakeup_prepared = false;
3203 
3204 	dev->pm_cap = 0;
3205 	dev->pme_support = 0;
3206 
3207 	/* find PCI PM capability in list */
3208 	pm = pci_find_capability(dev, PCI_CAP_ID_PM);
3209 	if (!pm)
3210 		return;
3211 	/* Check device's ability to generate PME# */
3212 	pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
3213 
3214 	if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
3215 		pci_err(dev, "unsupported PM cap regs version (%u)\n",
3216 			pmc & PCI_PM_CAP_VER_MASK);
3217 		return;
3218 	}
3219 
3220 	dev->pm_cap = pm;
3221 	dev->d3hot_delay = PCI_PM_D3HOT_WAIT;
3222 	dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
3223 	dev->bridge_d3 = pci_bridge_d3_possible(dev);
3224 	dev->d3cold_allowed = true;
3225 
3226 	dev->d1_support = false;
3227 	dev->d2_support = false;
3228 	if (!pci_no_d1d2(dev)) {
3229 		if (pmc & PCI_PM_CAP_D1)
3230 			dev->d1_support = true;
3231 		if (pmc & PCI_PM_CAP_D2)
3232 			dev->d2_support = true;
3233 
3234 		if (dev->d1_support || dev->d2_support)
3235 			pci_info(dev, "supports%s%s\n",
3236 				   dev->d1_support ? " D1" : "",
3237 				   dev->d2_support ? " D2" : "");
3238 	}
3239 
3240 	pmc &= PCI_PM_CAP_PME_MASK;
3241 	if (pmc) {
3242 		pci_info(dev, "PME# supported from%s%s%s%s%s\n",
3243 			 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
3244 			 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
3245 			 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
3246 			 (pmc & PCI_PM_CAP_PME_D3hot) ? " D3hot" : "",
3247 			 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
3248 		dev->pme_support = FIELD_GET(PCI_PM_CAP_PME_MASK, pmc);
3249 		dev->pme_poll = true;
3250 		/*
3251 		 * Make device's PM flags reflect the wake-up capability, but
3252 		 * let the user space enable it to wake up the system as needed.
3253 		 */
3254 		device_set_wakeup_capable(&dev->dev, true);
3255 		/* Disable the PME# generation functionality */
3256 		pci_pme_active(dev, false);
3257 	}
3258 
3259 	pci_read_config_word(dev, PCI_STATUS, &status);
3260 	if (status & PCI_STATUS_IMM_READY)
3261 		dev->imm_ready = 1;
3262 }
3263 
pci_ea_flags(struct pci_dev * dev,u8 prop)3264 static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
3265 {
3266 	unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
3267 
3268 	switch (prop) {
3269 	case PCI_EA_P_MEM:
3270 	case PCI_EA_P_VF_MEM:
3271 		flags |= IORESOURCE_MEM;
3272 		break;
3273 	case PCI_EA_P_MEM_PREFETCH:
3274 	case PCI_EA_P_VF_MEM_PREFETCH:
3275 		flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
3276 		break;
3277 	case PCI_EA_P_IO:
3278 		flags |= IORESOURCE_IO;
3279 		break;
3280 	default:
3281 		return 0;
3282 	}
3283 
3284 	return flags;
3285 }
3286 
pci_ea_get_resource(struct pci_dev * dev,u8 bei,u8 prop)3287 static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
3288 					    u8 prop)
3289 {
3290 	if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
3291 		return &dev->resource[bei];
3292 #ifdef CONFIG_PCI_IOV
3293 	else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
3294 		 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
3295 		return &dev->resource[PCI_IOV_RESOURCES +
3296 				      bei - PCI_EA_BEI_VF_BAR0];
3297 #endif
3298 	else if (bei == PCI_EA_BEI_ROM)
3299 		return &dev->resource[PCI_ROM_RESOURCE];
3300 	else
3301 		return NULL;
3302 }
3303 
3304 /* Read an Enhanced Allocation (EA) entry */
pci_ea_read(struct pci_dev * dev,int offset)3305 static int pci_ea_read(struct pci_dev *dev, int offset)
3306 {
3307 	struct resource *res;
3308 	const char *res_name;
3309 	int ent_size, ent_offset = offset;
3310 	resource_size_t start, end;
3311 	unsigned long flags;
3312 	u32 dw0, bei, base, max_offset;
3313 	u8 prop;
3314 	bool support_64 = (sizeof(resource_size_t) >= 8);
3315 
3316 	pci_read_config_dword(dev, ent_offset, &dw0);
3317 	ent_offset += 4;
3318 
3319 	/* Entry size field indicates DWORDs after 1st */
3320 	ent_size = (FIELD_GET(PCI_EA_ES, dw0) + 1) << 2;
3321 
3322 	if (!(dw0 & PCI_EA_ENABLE)) /* Entry not enabled */
3323 		goto out;
3324 
3325 	bei = FIELD_GET(PCI_EA_BEI, dw0);
3326 	prop = FIELD_GET(PCI_EA_PP, dw0);
3327 
3328 	/*
3329 	 * If the Property is in the reserved range, try the Secondary
3330 	 * Property instead.
3331 	 */
3332 	if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
3333 		prop = FIELD_GET(PCI_EA_SP, dw0);
3334 	if (prop > PCI_EA_P_BRIDGE_IO)
3335 		goto out;
3336 
3337 	res = pci_ea_get_resource(dev, bei, prop);
3338 	res_name = pci_resource_name(dev, bei);
3339 	if (!res) {
3340 		pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
3341 		goto out;
3342 	}
3343 
3344 	flags = pci_ea_flags(dev, prop);
3345 	if (!flags) {
3346 		pci_err(dev, "Unsupported EA properties: %#x\n", prop);
3347 		goto out;
3348 	}
3349 
3350 	/* Read Base */
3351 	pci_read_config_dword(dev, ent_offset, &base);
3352 	start = (base & PCI_EA_FIELD_MASK);
3353 	ent_offset += 4;
3354 
3355 	/* Read MaxOffset */
3356 	pci_read_config_dword(dev, ent_offset, &max_offset);
3357 	ent_offset += 4;
3358 
3359 	/* Read Base MSBs (if 64-bit entry) */
3360 	if (base & PCI_EA_IS_64) {
3361 		u32 base_upper;
3362 
3363 		pci_read_config_dword(dev, ent_offset, &base_upper);
3364 		ent_offset += 4;
3365 
3366 		flags |= IORESOURCE_MEM_64;
3367 
3368 		/* entry starts above 32-bit boundary, can't use */
3369 		if (!support_64 && base_upper)
3370 			goto out;
3371 
3372 		if (support_64)
3373 			start |= ((u64)base_upper << 32);
3374 	}
3375 
3376 	end = start + (max_offset | 0x03);
3377 
3378 	/* Read MaxOffset MSBs (if 64-bit entry) */
3379 	if (max_offset & PCI_EA_IS_64) {
3380 		u32 max_offset_upper;
3381 
3382 		pci_read_config_dword(dev, ent_offset, &max_offset_upper);
3383 		ent_offset += 4;
3384 
3385 		flags |= IORESOURCE_MEM_64;
3386 
3387 		/* entry too big, can't use */
3388 		if (!support_64 && max_offset_upper)
3389 			goto out;
3390 
3391 		if (support_64)
3392 			end += ((u64)max_offset_upper << 32);
3393 	}
3394 
3395 	if (end < start) {
3396 		pci_err(dev, "EA Entry crosses address boundary\n");
3397 		goto out;
3398 	}
3399 
3400 	if (ent_size != ent_offset - offset) {
3401 		pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n",
3402 			ent_size, ent_offset - offset);
3403 		goto out;
3404 	}
3405 
3406 	res->name = pci_name(dev);
3407 	res->start = start;
3408 	res->end = end;
3409 	res->flags = flags;
3410 
3411 	if (bei <= PCI_EA_BEI_BAR5)
3412 		pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n",
3413 			 res_name, res, prop);
3414 	else if (bei == PCI_EA_BEI_ROM)
3415 		pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n",
3416 			 res_name, res, prop);
3417 	else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
3418 		pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n",
3419 			 res_name, res, prop);
3420 	else
3421 		pci_info(dev, "BEI %d %pR: from Enhanced Allocation, properties %#02x\n",
3422 			   bei, res, prop);
3423 
3424 out:
3425 	return offset + ent_size;
3426 }
3427 
3428 /* Enhanced Allocation Initialization */
pci_ea_init(struct pci_dev * dev)3429 void pci_ea_init(struct pci_dev *dev)
3430 {
3431 	int ea;
3432 	u8 num_ent;
3433 	int offset;
3434 	int i;
3435 
3436 	/* find PCI EA capability in list */
3437 	ea = pci_find_capability(dev, PCI_CAP_ID_EA);
3438 	if (!ea)
3439 		return;
3440 
3441 	/* determine the number of entries */
3442 	pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
3443 					&num_ent);
3444 	num_ent &= PCI_EA_NUM_ENT_MASK;
3445 
3446 	offset = ea + PCI_EA_FIRST_ENT;
3447 
3448 	/* Skip DWORD 2 for type 1 functions */
3449 	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
3450 		offset += 4;
3451 
3452 	/* parse each EA entry */
3453 	for (i = 0; i < num_ent; ++i)
3454 		offset = pci_ea_read(dev, offset);
3455 }
3456 
pci_add_saved_cap(struct pci_dev * pci_dev,struct pci_cap_saved_state * new_cap)3457 static void pci_add_saved_cap(struct pci_dev *pci_dev,
3458 	struct pci_cap_saved_state *new_cap)
3459 {
3460 	hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
3461 }
3462 
3463 /**
3464  * _pci_add_cap_save_buffer - allocate buffer for saving given
3465  *			      capability registers
3466  * @dev: the PCI device
3467  * @cap: the capability to allocate the buffer for
3468  * @extended: Standard or Extended capability ID
3469  * @size: requested size of the buffer
3470  */
_pci_add_cap_save_buffer(struct pci_dev * dev,u16 cap,bool extended,unsigned int size)3471 static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
3472 				    bool extended, unsigned int size)
3473 {
3474 	int pos;
3475 	struct pci_cap_saved_state *save_state;
3476 
3477 	if (extended)
3478 		pos = pci_find_ext_capability(dev, cap);
3479 	else
3480 		pos = pci_find_capability(dev, cap);
3481 
3482 	if (!pos)
3483 		return 0;
3484 
3485 	save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
3486 	if (!save_state)
3487 		return -ENOMEM;
3488 
3489 	save_state->cap.cap_nr = cap;
3490 	save_state->cap.cap_extended = extended;
3491 	save_state->cap.size = size;
3492 	pci_add_saved_cap(dev, save_state);
3493 
3494 	return 0;
3495 }
3496 
pci_add_cap_save_buffer(struct pci_dev * dev,char cap,unsigned int size)3497 int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
3498 {
3499 	return _pci_add_cap_save_buffer(dev, cap, false, size);
3500 }
3501 
pci_add_ext_cap_save_buffer(struct pci_dev * dev,u16 cap,unsigned int size)3502 int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
3503 {
3504 	return _pci_add_cap_save_buffer(dev, cap, true, size);
3505 }
3506 
3507 /**
3508  * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
3509  * @dev: the PCI device
3510  */
pci_allocate_cap_save_buffers(struct pci_dev * dev)3511 void pci_allocate_cap_save_buffers(struct pci_dev *dev)
3512 {
3513 	int error;
3514 
3515 	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
3516 					PCI_EXP_SAVE_REGS * sizeof(u16));
3517 	if (error)
3518 		pci_err(dev, "unable to preallocate PCI Express save buffer\n");
3519 
3520 	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
3521 	if (error)
3522 		pci_err(dev, "unable to preallocate PCI-X save buffer\n");
3523 
3524 	error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR,
3525 					    2 * sizeof(u16));
3526 	if (error)
3527 		pci_err(dev, "unable to allocate suspend buffer for LTR\n");
3528 
3529 	pci_allocate_vc_save_buffers(dev);
3530 }
3531 
pci_free_cap_save_buffers(struct pci_dev * dev)3532 void pci_free_cap_save_buffers(struct pci_dev *dev)
3533 {
3534 	struct pci_cap_saved_state *tmp;
3535 	struct hlist_node *n;
3536 
3537 	hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
3538 		kfree(tmp);
3539 }
3540 
3541 /**
3542  * pci_configure_ari - enable or disable ARI forwarding
3543  * @dev: the PCI device
3544  *
3545  * If @dev and its upstream bridge both support ARI, enable ARI in the
3546  * bridge.  Otherwise, disable ARI in the bridge.
3547  */
pci_configure_ari(struct pci_dev * dev)3548 void pci_configure_ari(struct pci_dev *dev)
3549 {
3550 	u32 cap;
3551 	struct pci_dev *bridge;
3552 
3553 	if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
3554 		return;
3555 
3556 	bridge = dev->bus->self;
3557 	if (!bridge)
3558 		return;
3559 
3560 	pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3561 	if (!(cap & PCI_EXP_DEVCAP2_ARI))
3562 		return;
3563 
3564 	if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
3565 		pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
3566 					 PCI_EXP_DEVCTL2_ARI);
3567 		bridge->ari_enabled = 1;
3568 	} else {
3569 		pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
3570 					   PCI_EXP_DEVCTL2_ARI);
3571 		bridge->ari_enabled = 0;
3572 	}
3573 }
3574 
pci_acs_flags_enabled(struct pci_dev * pdev,u16 acs_flags)3575 static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
3576 {
3577 	int pos;
3578 	u16 cap, ctrl;
3579 
3580 	pos = pdev->acs_cap;
3581 	if (!pos)
3582 		return false;
3583 
3584 	/*
3585 	 * Except for egress control, capabilities are either required
3586 	 * or only required if controllable.  Features missing from the
3587 	 * capability field can therefore be assumed as hard-wired enabled.
3588 	 */
3589 	pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
3590 	acs_flags &= (cap | PCI_ACS_EC);
3591 
3592 	pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
3593 	return (ctrl & acs_flags) == acs_flags;
3594 }
3595 
3596 /**
3597  * pci_acs_enabled - test ACS against required flags for a given device
3598  * @pdev: device to test
3599  * @acs_flags: required PCI ACS flags
3600  *
3601  * Return true if the device supports the provided flags.  Automatically
3602  * filters out flags that are not implemented on multifunction devices.
3603  *
3604  * Note that this interface checks the effective ACS capabilities of the
3605  * device rather than the actual capabilities.  For instance, most single
3606  * function endpoints are not required to support ACS because they have no
3607  * opportunity for peer-to-peer access.  We therefore return 'true'
3608  * regardless of whether the device exposes an ACS capability.  This makes
3609  * it much easier for callers of this function to ignore the actual type
3610  * or topology of the device when testing ACS support.
3611  */
pci_acs_enabled(struct pci_dev * pdev,u16 acs_flags)3612 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
3613 {
3614 	int ret;
3615 
3616 	ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
3617 	if (ret >= 0)
3618 		return ret > 0;
3619 
3620 	/*
3621 	 * Conventional PCI and PCI-X devices never support ACS, either
3622 	 * effectively or actually.  The shared bus topology implies that
3623 	 * any device on the bus can receive or snoop DMA.
3624 	 */
3625 	if (!pci_is_pcie(pdev))
3626 		return false;
3627 
3628 	switch (pci_pcie_type(pdev)) {
3629 	/*
3630 	 * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
3631 	 * but since their primary interface is PCI/X, we conservatively
3632 	 * handle them as we would a non-PCIe device.
3633 	 */
3634 	case PCI_EXP_TYPE_PCIE_BRIDGE:
3635 	/*
3636 	 * PCIe 3.0, 6.12.1 excludes ACS on these devices.  "ACS is never
3637 	 * applicable... must never implement an ACS Extended Capability...".
3638 	 * This seems arbitrary, but we take a conservative interpretation
3639 	 * of this statement.
3640 	 */
3641 	case PCI_EXP_TYPE_PCI_BRIDGE:
3642 	case PCI_EXP_TYPE_RC_EC:
3643 		return false;
3644 	/*
3645 	 * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should
3646 	 * implement ACS in order to indicate their peer-to-peer capabilities,
3647 	 * regardless of whether they are single- or multi-function devices.
3648 	 */
3649 	case PCI_EXP_TYPE_DOWNSTREAM:
3650 	case PCI_EXP_TYPE_ROOT_PORT:
3651 		return pci_acs_flags_enabled(pdev, acs_flags);
3652 	/*
3653 	 * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
3654 	 * implemented by the remaining PCIe types to indicate peer-to-peer
3655 	 * capabilities, but only when they are part of a multifunction
3656 	 * device.  The footnote for section 6.12 indicates the specific
3657 	 * PCIe types included here.
3658 	 */
3659 	case PCI_EXP_TYPE_ENDPOINT:
3660 	case PCI_EXP_TYPE_UPSTREAM:
3661 	case PCI_EXP_TYPE_LEG_END:
3662 	case PCI_EXP_TYPE_RC_END:
3663 		if (!pdev->multifunction)
3664 			break;
3665 
3666 		return pci_acs_flags_enabled(pdev, acs_flags);
3667 	}
3668 
3669 	/*
3670 	 * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable
3671 	 * to single function devices with the exception of downstream ports.
3672 	 */
3673 	return true;
3674 }
3675 
3676 /**
3677  * pci_acs_path_enabled - test ACS flags from start to end in a hierarchy
3678  * @start: starting downstream device
3679  * @end: ending upstream device or NULL to search to the root bus
3680  * @acs_flags: required flags
3681  *
3682  * Walk up a device tree from start to end testing PCI ACS support.  If
3683  * any step along the way does not support the required flags, return false.
3684  */
pci_acs_path_enabled(struct pci_dev * start,struct pci_dev * end,u16 acs_flags)3685 bool pci_acs_path_enabled(struct pci_dev *start,
3686 			  struct pci_dev *end, u16 acs_flags)
3687 {
3688 	struct pci_dev *pdev, *parent = start;
3689 
3690 	do {
3691 		pdev = parent;
3692 
3693 		if (!pci_acs_enabled(pdev, acs_flags))
3694 			return false;
3695 
3696 		if (pci_is_root_bus(pdev->bus))
3697 			return (end == NULL);
3698 
3699 		parent = pdev->bus->self;
3700 	} while (pdev != end);
3701 
3702 	return true;
3703 }
3704 
3705 /**
3706  * pci_acs_init - Initialize ACS if hardware supports it
3707  * @dev: the PCI device
3708  */
pci_acs_init(struct pci_dev * dev)3709 void pci_acs_init(struct pci_dev *dev)
3710 {
3711 	dev->acs_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
3712 
3713 	/*
3714 	 * Attempt to enable ACS regardless of capability because some Root
3715 	 * Ports (e.g. those quirked with *_intel_pch_acs_*) do not have
3716 	 * the standard ACS capability but still support ACS via those
3717 	 * quirks.
3718 	 */
3719 	pci_enable_acs(dev);
3720 }
3721 
3722 /**
3723  * pci_rebar_find_pos - find position of resize ctrl reg for BAR
3724  * @pdev: PCI device
3725  * @bar: BAR to find
3726  *
3727  * Helper to find the position of the ctrl register for a BAR.
3728  * Returns -ENOTSUPP if resizable BARs are not supported at all.
3729  * Returns -ENOENT if no ctrl register for the BAR could be found.
3730  */
pci_rebar_find_pos(struct pci_dev * pdev,int bar)3731 static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
3732 {
3733 	unsigned int pos, nbars, i;
3734 	u32 ctrl;
3735 
3736 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
3737 	if (!pos)
3738 		return -ENOTSUPP;
3739 
3740 	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3741 	nbars = FIELD_GET(PCI_REBAR_CTRL_NBAR_MASK, ctrl);
3742 
3743 	for (i = 0; i < nbars; i++, pos += 8) {
3744 		int bar_idx;
3745 
3746 		pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3747 		bar_idx = FIELD_GET(PCI_REBAR_CTRL_BAR_IDX, ctrl);
3748 		if (bar_idx == bar)
3749 			return pos;
3750 	}
3751 
3752 	return -ENOENT;
3753 }
3754 
3755 /**
3756  * pci_rebar_get_possible_sizes - get possible sizes for BAR
3757  * @pdev: PCI device
3758  * @bar: BAR to query
3759  *
3760  * Get the possible sizes of a resizable BAR as bitmask defined in the spec
3761  * (bit 0=1MB, bit 19=512GB). Returns 0 if BAR isn't resizable.
3762  */
pci_rebar_get_possible_sizes(struct pci_dev * pdev,int bar)3763 u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
3764 {
3765 	int pos;
3766 	u32 cap;
3767 
3768 	pos = pci_rebar_find_pos(pdev, bar);
3769 	if (pos < 0)
3770 		return 0;
3771 
3772 	pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
3773 	cap = FIELD_GET(PCI_REBAR_CAP_SIZES, cap);
3774 
3775 	/* Sapphire RX 5600 XT Pulse has an invalid cap dword for BAR 0 */
3776 	if (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x731f &&
3777 	    bar == 0 && cap == 0x700)
3778 		return 0x3f00;
3779 
3780 	return cap;
3781 }
3782 EXPORT_SYMBOL(pci_rebar_get_possible_sizes);
3783 
3784 /**
3785  * pci_rebar_get_current_size - get the current size of a BAR
3786  * @pdev: PCI device
3787  * @bar: BAR to set size to
3788  *
3789  * Read the size of a BAR from the resizable BAR config.
3790  * Returns size if found or negative error code.
3791  */
pci_rebar_get_current_size(struct pci_dev * pdev,int bar)3792 int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
3793 {
3794 	int pos;
3795 	u32 ctrl;
3796 
3797 	pos = pci_rebar_find_pos(pdev, bar);
3798 	if (pos < 0)
3799 		return pos;
3800 
3801 	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3802 	return FIELD_GET(PCI_REBAR_CTRL_BAR_SIZE, ctrl);
3803 }
3804 
3805 /**
3806  * pci_rebar_set_size - set a new size for a BAR
3807  * @pdev: PCI device
3808  * @bar: BAR to set size to
3809  * @size: new size as defined in the spec (0=1MB, 19=512GB)
3810  *
3811  * Set the new size of a BAR as defined in the spec.
3812  * Returns zero if resizing was successful, error code otherwise.
3813  */
pci_rebar_set_size(struct pci_dev * pdev,int bar,int size)3814 int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
3815 {
3816 	int pos;
3817 	u32 ctrl;
3818 
3819 	pos = pci_rebar_find_pos(pdev, bar);
3820 	if (pos < 0)
3821 		return pos;
3822 
3823 	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3824 	ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
3825 	ctrl |= FIELD_PREP(PCI_REBAR_CTRL_BAR_SIZE, size);
3826 	pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
3827 	return 0;
3828 }
3829 
3830 /**
3831  * pci_enable_atomic_ops_to_root - enable AtomicOp requests to root port
3832  * @dev: the PCI device
3833  * @cap_mask: mask of desired AtomicOp sizes, including one or more of:
3834  *	PCI_EXP_DEVCAP2_ATOMIC_COMP32
3835  *	PCI_EXP_DEVCAP2_ATOMIC_COMP64
3836  *	PCI_EXP_DEVCAP2_ATOMIC_COMP128
3837  *
3838  * Return 0 if all upstream bridges support AtomicOp routing, egress
3839  * blocking is disabled on all upstream ports, and the root port supports
3840  * the requested completion capabilities (32-bit, 64-bit and/or 128-bit
3841  * AtomicOp completion), or negative otherwise.
3842  */
pci_enable_atomic_ops_to_root(struct pci_dev * dev,u32 cap_mask)3843 int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
3844 {
3845 	struct pci_bus *bus = dev->bus;
3846 	struct pci_dev *bridge;
3847 	u32 cap, ctl2;
3848 
3849 	/*
3850 	 * Per PCIe r5.0, sec 9.3.5.10, the AtomicOp Requester Enable bit
3851 	 * in Device Control 2 is reserved in VFs and the PF value applies
3852 	 * to all associated VFs.
3853 	 */
3854 	if (dev->is_virtfn)
3855 		return -EINVAL;
3856 
3857 	if (!pci_is_pcie(dev))
3858 		return -EINVAL;
3859 
3860 	/*
3861 	 * Per PCIe r4.0, sec 6.15, endpoints and root ports may be
3862 	 * AtomicOp requesters.  For now, we only support endpoints as
3863 	 * requesters and root ports as completers.  No endpoints as
3864 	 * completers, and no peer-to-peer.
3865 	 */
3866 
3867 	switch (pci_pcie_type(dev)) {
3868 	case PCI_EXP_TYPE_ENDPOINT:
3869 	case PCI_EXP_TYPE_LEG_END:
3870 	case PCI_EXP_TYPE_RC_END:
3871 		break;
3872 	default:
3873 		return -EINVAL;
3874 	}
3875 
3876 	while (bus->parent) {
3877 		bridge = bus->self;
3878 
3879 		pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3880 
3881 		switch (pci_pcie_type(bridge)) {
3882 		/* Ensure switch ports support AtomicOp routing */
3883 		case PCI_EXP_TYPE_UPSTREAM:
3884 		case PCI_EXP_TYPE_DOWNSTREAM:
3885 			if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
3886 				return -EINVAL;
3887 			break;
3888 
3889 		/* Ensure root port supports all the sizes we care about */
3890 		case PCI_EXP_TYPE_ROOT_PORT:
3891 			if ((cap & cap_mask) != cap_mask)
3892 				return -EINVAL;
3893 			break;
3894 		}
3895 
3896 		/* Ensure upstream ports don't block AtomicOps on egress */
3897 		if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) {
3898 			pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
3899 						   &ctl2);
3900 			if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
3901 				return -EINVAL;
3902 		}
3903 
3904 		bus = bus->parent;
3905 	}
3906 
3907 	pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
3908 				 PCI_EXP_DEVCTL2_ATOMIC_REQ);
3909 	return 0;
3910 }
3911 EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
3912 
3913 /**
3914  * pci_release_region - Release a PCI bar
3915  * @pdev: PCI device whose resources were previously reserved by
3916  *	  pci_request_region()
3917  * @bar: BAR to release
3918  *
3919  * Releases the PCI I/O and memory resources previously reserved by a
3920  * successful call to pci_request_region().  Call this function only
3921  * after all use of the PCI regions has ceased.
3922  */
pci_release_region(struct pci_dev * pdev,int bar)3923 void pci_release_region(struct pci_dev *pdev, int bar)
3924 {
3925 	if (!pci_bar_index_is_valid(bar))
3926 		return;
3927 
3928 	/*
3929 	 * This is done for backwards compatibility, because the old PCI devres
3930 	 * API had a mode in which the function became managed if it had been
3931 	 * enabled with pcim_enable_device() instead of pci_enable_device().
3932 	 */
3933 	if (pci_is_managed(pdev)) {
3934 		pcim_release_region(pdev, bar);
3935 		return;
3936 	}
3937 
3938 	if (pci_resource_len(pdev, bar) == 0)
3939 		return;
3940 	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
3941 		release_region(pci_resource_start(pdev, bar),
3942 				pci_resource_len(pdev, bar));
3943 	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
3944 		release_mem_region(pci_resource_start(pdev, bar),
3945 				pci_resource_len(pdev, bar));
3946 }
3947 EXPORT_SYMBOL(pci_release_region);
3948 
3949 /**
3950  * __pci_request_region - Reserved PCI I/O and memory resource
3951  * @pdev: PCI device whose resources are to be reserved
3952  * @bar: BAR to be reserved
3953  * @res_name: Name to be associated with resource.
3954  * @exclusive: whether the region access is exclusive or not
3955  *
3956  * Returns: 0 on success, negative error code on failure.
3957  *
3958  * Mark the PCI region associated with PCI device @pdev BAR @bar as
3959  * being reserved by owner @res_name.  Do not access any
3960  * address inside the PCI regions unless this call returns
3961  * successfully.
3962  *
3963  * If @exclusive is set, then the region is marked so that userspace
3964  * is explicitly not allowed to map the resource via /dev/mem or
3965  * sysfs MMIO access.
3966  *
3967  * Returns 0 on success, or %EBUSY on error.  A warning
3968  * message is also printed on failure.
3969  */
__pci_request_region(struct pci_dev * pdev,int bar,const char * res_name,int exclusive)3970 static int __pci_request_region(struct pci_dev *pdev, int bar,
3971 				const char *res_name, int exclusive)
3972 {
3973 	if (!pci_bar_index_is_valid(bar))
3974 		return -EINVAL;
3975 
3976 	if (pci_is_managed(pdev)) {
3977 		if (exclusive == IORESOURCE_EXCLUSIVE)
3978 			return pcim_request_region_exclusive(pdev, bar, res_name);
3979 
3980 		return pcim_request_region(pdev, bar, res_name);
3981 	}
3982 
3983 	if (pci_resource_len(pdev, bar) == 0)
3984 		return 0;
3985 
3986 	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
3987 		if (!request_region(pci_resource_start(pdev, bar),
3988 			    pci_resource_len(pdev, bar), res_name))
3989 			goto err_out;
3990 	} else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
3991 		if (!__request_mem_region(pci_resource_start(pdev, bar),
3992 					pci_resource_len(pdev, bar), res_name,
3993 					exclusive))
3994 			goto err_out;
3995 	}
3996 
3997 	return 0;
3998 
3999 err_out:
4000 	pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar,
4001 		 &pdev->resource[bar]);
4002 	return -EBUSY;
4003 }
4004 
4005 /**
4006  * pci_request_region - Reserve PCI I/O and memory resource
4007  * @pdev: PCI device whose resources are to be reserved
4008  * @bar: BAR to be reserved
4009  * @res_name: Name to be associated with resource
4010  *
4011  * Returns: 0 on success, negative error code on failure.
4012  *
4013  * Mark the PCI region associated with PCI device @pdev BAR @bar as
4014  * being reserved by owner @res_name.  Do not access any
4015  * address inside the PCI regions unless this call returns
4016  * successfully.
4017  *
4018  * Returns 0 on success, or %EBUSY on error.  A warning
4019  * message is also printed on failure.
4020  *
4021  * NOTE:
4022  * This is a "hybrid" function: It's normally unmanaged, but becomes managed
4023  * when pcim_enable_device() has been called in advance. This hybrid feature is
4024  * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
4025  */
pci_request_region(struct pci_dev * pdev,int bar,const char * res_name)4026 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
4027 {
4028 	return __pci_request_region(pdev, bar, res_name, 0);
4029 }
4030 EXPORT_SYMBOL(pci_request_region);
4031 
4032 /**
4033  * pci_release_selected_regions - Release selected PCI I/O and memory resources
4034  * @pdev: PCI device whose resources were previously reserved
4035  * @bars: Bitmask of BARs to be released
4036  *
4037  * Release selected PCI I/O and memory resources previously reserved.
4038  * Call this function only after all use of the PCI regions has ceased.
4039  */
pci_release_selected_regions(struct pci_dev * pdev,int bars)4040 void pci_release_selected_regions(struct pci_dev *pdev, int bars)
4041 {
4042 	int i;
4043 
4044 	for (i = 0; i < PCI_STD_NUM_BARS; i++)
4045 		if (bars & (1 << i))
4046 			pci_release_region(pdev, i);
4047 }
4048 EXPORT_SYMBOL(pci_release_selected_regions);
4049 
__pci_request_selected_regions(struct pci_dev * pdev,int bars,const char * res_name,int excl)4050 static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
4051 					  const char *res_name, int excl)
4052 {
4053 	int i;
4054 
4055 	for (i = 0; i < PCI_STD_NUM_BARS; i++)
4056 		if (bars & (1 << i))
4057 			if (__pci_request_region(pdev, i, res_name, excl))
4058 				goto err_out;
4059 	return 0;
4060 
4061 err_out:
4062 	while (--i >= 0)
4063 		if (bars & (1 << i))
4064 			pci_release_region(pdev, i);
4065 
4066 	return -EBUSY;
4067 }
4068 
4069 
4070 /**
4071  * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
4072  * @pdev: PCI device whose resources are to be reserved
4073  * @bars: Bitmask of BARs to be requested
4074  * @res_name: Name to be associated with resource
4075  *
4076  * Returns: 0 on success, negative error code on failure.
4077  *
4078  * NOTE:
4079  * This is a "hybrid" function: It's normally unmanaged, but becomes managed
4080  * when pcim_enable_device() has been called in advance. This hybrid feature is
4081  * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
4082  */
pci_request_selected_regions(struct pci_dev * pdev,int bars,const char * res_name)4083 int pci_request_selected_regions(struct pci_dev *pdev, int bars,
4084 				 const char *res_name)
4085 {
4086 	return __pci_request_selected_regions(pdev, bars, res_name, 0);
4087 }
4088 EXPORT_SYMBOL(pci_request_selected_regions);
4089 
4090 /**
4091  * pci_request_selected_regions_exclusive - Request regions exclusively
4092  * @pdev: PCI device to request regions from
4093  * @bars: bit mask of BARs to request
4094  * @res_name: name to be associated with the requests
4095  *
4096  * Returns: 0 on success, negative error code on failure.
4097  *
4098  * NOTE:
4099  * This is a "hybrid" function: It's normally unmanaged, but becomes managed
4100  * when pcim_enable_device() has been called in advance. This hybrid feature is
4101  * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
4102  */
pci_request_selected_regions_exclusive(struct pci_dev * pdev,int bars,const char * res_name)4103 int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
4104 					   const char *res_name)
4105 {
4106 	return __pci_request_selected_regions(pdev, bars, res_name,
4107 			IORESOURCE_EXCLUSIVE);
4108 }
4109 EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
4110 
4111 /**
4112  * pci_release_regions - Release reserved PCI I/O and memory resources
4113  * @pdev: PCI device whose resources were previously reserved by
4114  *	  pci_request_regions()
4115  *
4116  * Releases all PCI I/O and memory resources previously reserved by a
4117  * successful call to pci_request_regions().  Call this function only
4118  * after all use of the PCI regions has ceased.
4119  */
pci_release_regions(struct pci_dev * pdev)4120 void pci_release_regions(struct pci_dev *pdev)
4121 {
4122 	pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1);
4123 }
4124 EXPORT_SYMBOL(pci_release_regions);
4125 
4126 /**
4127  * pci_request_regions - Reserve PCI I/O and memory resources
4128  * @pdev: PCI device whose resources are to be reserved
4129  * @res_name: Name to be associated with resource.
4130  *
4131  * Mark all PCI regions associated with PCI device @pdev as
4132  * being reserved by owner @res_name.  Do not access any
4133  * address inside the PCI regions unless this call returns
4134  * successfully.
4135  *
4136  * Returns 0 on success, or %EBUSY on error.  A warning
4137  * message is also printed on failure.
4138  *
4139  * NOTE:
4140  * This is a "hybrid" function: It's normally unmanaged, but becomes managed
4141  * when pcim_enable_device() has been called in advance. This hybrid feature is
4142  * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
4143  */
pci_request_regions(struct pci_dev * pdev,const char * res_name)4144 int pci_request_regions(struct pci_dev *pdev, const char *res_name)
4145 {
4146 	return pci_request_selected_regions(pdev,
4147 			((1 << PCI_STD_NUM_BARS) - 1), res_name);
4148 }
4149 EXPORT_SYMBOL(pci_request_regions);
4150 
4151 /**
4152  * pci_request_regions_exclusive - Reserve PCI I/O and memory resources
4153  * @pdev: PCI device whose resources are to be reserved
4154  * @res_name: Name to be associated with resource.
4155  *
4156  * Returns: 0 on success, negative error code on failure.
4157  *
4158  * Mark all PCI regions associated with PCI device @pdev as being reserved
4159  * by owner @res_name.  Do not access any address inside the PCI regions
4160  * unless this call returns successfully.
4161  *
4162  * pci_request_regions_exclusive() will mark the region so that /dev/mem
4163  * and the sysfs MMIO access will not be allowed.
4164  *
4165  * Returns 0 on success, or %EBUSY on error.  A warning message is also
4166  * printed on failure.
4167  *
4168  * NOTE:
4169  * This is a "hybrid" function: It's normally unmanaged, but becomes managed
4170  * when pcim_enable_device() has been called in advance. This hybrid feature is
4171  * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
4172  */
pci_request_regions_exclusive(struct pci_dev * pdev,const char * res_name)4173 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
4174 {
4175 	return pci_request_selected_regions_exclusive(pdev,
4176 				((1 << PCI_STD_NUM_BARS) - 1), res_name);
4177 }
4178 EXPORT_SYMBOL(pci_request_regions_exclusive);
4179 
4180 /*
4181  * Record the PCI IO range (expressed as CPU physical address + size).
4182  * Return a negative value if an error has occurred, zero otherwise
4183  */
pci_register_io_range(struct fwnode_handle * fwnode,phys_addr_t addr,resource_size_t size)4184 int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
4185 			resource_size_t	size)
4186 {
4187 	int ret = 0;
4188 #ifdef PCI_IOBASE
4189 	struct logic_pio_hwaddr *range;
4190 
4191 	if (!size || addr + size < addr)
4192 		return -EINVAL;
4193 
4194 	range = kzalloc(sizeof(*range), GFP_ATOMIC);
4195 	if (!range)
4196 		return -ENOMEM;
4197 
4198 	range->fwnode = fwnode;
4199 	range->size = size;
4200 	range->hw_start = addr;
4201 	range->flags = LOGIC_PIO_CPU_MMIO;
4202 
4203 	ret = logic_pio_register_range(range);
4204 	if (ret)
4205 		kfree(range);
4206 
4207 	/* Ignore duplicates due to deferred probing */
4208 	if (ret == -EEXIST)
4209 		ret = 0;
4210 #endif
4211 
4212 	return ret;
4213 }
4214 
pci_pio_to_address(unsigned long pio)4215 phys_addr_t pci_pio_to_address(unsigned long pio)
4216 {
4217 #ifdef PCI_IOBASE
4218 	if (pio < MMIO_UPPER_LIMIT)
4219 		return logic_pio_to_hwaddr(pio);
4220 #endif
4221 
4222 	return (phys_addr_t) OF_BAD_ADDR;
4223 }
4224 EXPORT_SYMBOL_GPL(pci_pio_to_address);
4225 
pci_address_to_pio(phys_addr_t address)4226 unsigned long __weak pci_address_to_pio(phys_addr_t address)
4227 {
4228 #ifdef PCI_IOBASE
4229 	return logic_pio_trans_cpuaddr(address);
4230 #else
4231 	if (address > IO_SPACE_LIMIT)
4232 		return (unsigned long)-1;
4233 
4234 	return (unsigned long) address;
4235 #endif
4236 }
4237 
4238 /**
4239  * pci_remap_iospace - Remap the memory mapped I/O space
4240  * @res: Resource describing the I/O space
4241  * @phys_addr: physical address of range to be mapped
4242  *
4243  * Remap the memory mapped I/O space described by the @res and the CPU
4244  * physical address @phys_addr into virtual address space.  Only
4245  * architectures that have memory mapped IO functions defined (and the
4246  * PCI_IOBASE value defined) should call this function.
4247  */
4248 #ifndef pci_remap_iospace
pci_remap_iospace(const struct resource * res,phys_addr_t phys_addr)4249 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
4250 {
4251 #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4252 	unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4253 
4254 	if (!(res->flags & IORESOURCE_IO))
4255 		return -EINVAL;
4256 
4257 	if (res->end > IO_SPACE_LIMIT)
4258 		return -EINVAL;
4259 
4260 	return vmap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
4261 			       pgprot_device(PAGE_KERNEL));
4262 #else
4263 	/*
4264 	 * This architecture does not have memory mapped I/O space,
4265 	 * so this function should never be called
4266 	 */
4267 	WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
4268 	return -ENODEV;
4269 #endif
4270 }
4271 EXPORT_SYMBOL(pci_remap_iospace);
4272 #endif
4273 
4274 /**
4275  * pci_unmap_iospace - Unmap the memory mapped I/O space
4276  * @res: resource to be unmapped
4277  *
4278  * Unmap the CPU virtual address @res from virtual address space.  Only
4279  * architectures that have memory mapped IO functions defined (and the
4280  * PCI_IOBASE value defined) should call this function.
4281  */
pci_unmap_iospace(struct resource * res)4282 void pci_unmap_iospace(struct resource *res)
4283 {
4284 #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4285 	unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4286 
4287 	vunmap_range(vaddr, vaddr + resource_size(res));
4288 #endif
4289 }
4290 EXPORT_SYMBOL(pci_unmap_iospace);
4291 
__pci_set_master(struct pci_dev * dev,bool enable)4292 static void __pci_set_master(struct pci_dev *dev, bool enable)
4293 {
4294 	u16 old_cmd, cmd;
4295 
4296 	pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
4297 	if (enable)
4298 		cmd = old_cmd | PCI_COMMAND_MASTER;
4299 	else
4300 		cmd = old_cmd & ~PCI_COMMAND_MASTER;
4301 	if (cmd != old_cmd) {
4302 		pci_dbg(dev, "%s bus mastering\n",
4303 			enable ? "enabling" : "disabling");
4304 		pci_write_config_word(dev, PCI_COMMAND, cmd);
4305 	}
4306 	dev->is_busmaster = enable;
4307 }
4308 
4309 /**
4310  * pcibios_setup - process "pci=" kernel boot arguments
4311  * @str: string used to pass in "pci=" kernel boot arguments
4312  *
4313  * Process kernel boot arguments.  This is the default implementation.
4314  * Architecture specific implementations can override this as necessary.
4315  */
pcibios_setup(char * str)4316 char * __weak __init pcibios_setup(char *str)
4317 {
4318 	return str;
4319 }
4320 
4321 /**
4322  * pcibios_set_master - enable PCI bus-mastering for device dev
4323  * @dev: the PCI device to enable
4324  *
4325  * Enables PCI bus-mastering for the device.  This is the default
4326  * implementation.  Architecture specific implementations can override
4327  * this if necessary.
4328  */
pcibios_set_master(struct pci_dev * dev)4329 void __weak pcibios_set_master(struct pci_dev *dev)
4330 {
4331 	u8 lat;
4332 
4333 	/* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
4334 	if (pci_is_pcie(dev))
4335 		return;
4336 
4337 	pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
4338 	if (lat < 16)
4339 		lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
4340 	else if (lat > pcibios_max_latency)
4341 		lat = pcibios_max_latency;
4342 	else
4343 		return;
4344 
4345 	pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
4346 }
4347 
4348 /**
4349  * pci_set_master - enables bus-mastering for device dev
4350  * @dev: the PCI device to enable
4351  *
4352  * Enables bus-mastering on the device and calls pcibios_set_master()
4353  * to do the needed arch specific settings.
4354  */
pci_set_master(struct pci_dev * dev)4355 void pci_set_master(struct pci_dev *dev)
4356 {
4357 	__pci_set_master(dev, true);
4358 	pcibios_set_master(dev);
4359 }
4360 EXPORT_SYMBOL(pci_set_master);
4361 
4362 /**
4363  * pci_clear_master - disables bus-mastering for device dev
4364  * @dev: the PCI device to disable
4365  */
pci_clear_master(struct pci_dev * dev)4366 void pci_clear_master(struct pci_dev *dev)
4367 {
4368 	__pci_set_master(dev, false);
4369 }
4370 EXPORT_SYMBOL(pci_clear_master);
4371 
4372 /**
4373  * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
4374  * @dev: the PCI device for which MWI is to be enabled
4375  *
4376  * Helper function for pci_set_mwi.
4377  * Originally copied from drivers/net/acenic.c.
4378  * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
4379  *
4380  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4381  */
pci_set_cacheline_size(struct pci_dev * dev)4382 int pci_set_cacheline_size(struct pci_dev *dev)
4383 {
4384 	u8 cacheline_size;
4385 
4386 	if (!pci_cache_line_size)
4387 		return -EINVAL;
4388 
4389 	/* Validate current setting: the PCI_CACHE_LINE_SIZE must be
4390 	   equal to or multiple of the right value. */
4391 	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4392 	if (cacheline_size >= pci_cache_line_size &&
4393 	    (cacheline_size % pci_cache_line_size) == 0)
4394 		return 0;
4395 
4396 	/* Write the correct value. */
4397 	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
4398 	/* Read it back. */
4399 	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4400 	if (cacheline_size == pci_cache_line_size)
4401 		return 0;
4402 
4403 	pci_dbg(dev, "cache line size of %d is not supported\n",
4404 		   pci_cache_line_size << 2);
4405 
4406 	return -EINVAL;
4407 }
4408 EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
4409 
4410 /**
4411  * pci_set_mwi - enables memory-write-invalidate PCI transaction
4412  * @dev: the PCI device for which MWI is enabled
4413  *
4414  * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4415  *
4416  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4417  */
pci_set_mwi(struct pci_dev * dev)4418 int pci_set_mwi(struct pci_dev *dev)
4419 {
4420 #ifdef PCI_DISABLE_MWI
4421 	return 0;
4422 #else
4423 	int rc;
4424 	u16 cmd;
4425 
4426 	rc = pci_set_cacheline_size(dev);
4427 	if (rc)
4428 		return rc;
4429 
4430 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
4431 	if (!(cmd & PCI_COMMAND_INVALIDATE)) {
4432 		pci_dbg(dev, "enabling Mem-Wr-Inval\n");
4433 		cmd |= PCI_COMMAND_INVALIDATE;
4434 		pci_write_config_word(dev, PCI_COMMAND, cmd);
4435 	}
4436 	return 0;
4437 #endif
4438 }
4439 EXPORT_SYMBOL(pci_set_mwi);
4440 
4441 /**
4442  * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
4443  * @dev: the PCI device for which MWI is enabled
4444  *
4445  * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4446  * Callers are not required to check the return value.
4447  *
4448  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4449  */
pci_try_set_mwi(struct pci_dev * dev)4450 int pci_try_set_mwi(struct pci_dev *dev)
4451 {
4452 #ifdef PCI_DISABLE_MWI
4453 	return 0;
4454 #else
4455 	return pci_set_mwi(dev);
4456 #endif
4457 }
4458 EXPORT_SYMBOL(pci_try_set_mwi);
4459 
4460 /**
4461  * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
4462  * @dev: the PCI device to disable
4463  *
4464  * Disables PCI Memory-Write-Invalidate transaction on the device
4465  */
pci_clear_mwi(struct pci_dev * dev)4466 void pci_clear_mwi(struct pci_dev *dev)
4467 {
4468 #ifndef PCI_DISABLE_MWI
4469 	u16 cmd;
4470 
4471 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
4472 	if (cmd & PCI_COMMAND_INVALIDATE) {
4473 		cmd &= ~PCI_COMMAND_INVALIDATE;
4474 		pci_write_config_word(dev, PCI_COMMAND, cmd);
4475 	}
4476 #endif
4477 }
4478 EXPORT_SYMBOL(pci_clear_mwi);
4479 
4480 /**
4481  * pci_disable_parity - disable parity checking for device
4482  * @dev: the PCI device to operate on
4483  *
4484  * Disable parity checking for device @dev
4485  */
pci_disable_parity(struct pci_dev * dev)4486 void pci_disable_parity(struct pci_dev *dev)
4487 {
4488 	u16 cmd;
4489 
4490 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
4491 	if (cmd & PCI_COMMAND_PARITY) {
4492 		cmd &= ~PCI_COMMAND_PARITY;
4493 		pci_write_config_word(dev, PCI_COMMAND, cmd);
4494 	}
4495 }
4496 
4497 /**
4498  * pci_intx - enables/disables PCI INTx for device dev
4499  * @pdev: the PCI device to operate on
4500  * @enable: boolean: whether to enable or disable PCI INTx
4501  *
4502  * Enables/disables PCI INTx for device @pdev
4503  */
pci_intx(struct pci_dev * pdev,int enable)4504 void pci_intx(struct pci_dev *pdev, int enable)
4505 {
4506 	u16 pci_command, new;
4507 
4508 	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
4509 
4510 	if (enable)
4511 		new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
4512 	else
4513 		new = pci_command | PCI_COMMAND_INTX_DISABLE;
4514 
4515 	if (new == pci_command)
4516 		return;
4517 
4518 	pci_write_config_word(pdev, PCI_COMMAND, new);
4519 }
4520 EXPORT_SYMBOL_GPL(pci_intx);
4521 
4522 /**
4523  * pci_wait_for_pending_transaction - wait for pending transaction
4524  * @dev: the PCI device to operate on
4525  *
4526  * Return 0 if transaction is pending 1 otherwise.
4527  */
pci_wait_for_pending_transaction(struct pci_dev * dev)4528 int pci_wait_for_pending_transaction(struct pci_dev *dev)
4529 {
4530 	if (!pci_is_pcie(dev))
4531 		return 1;
4532 
4533 	return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
4534 				    PCI_EXP_DEVSTA_TRPND);
4535 }
4536 EXPORT_SYMBOL(pci_wait_for_pending_transaction);
4537 
4538 /**
4539  * pcie_flr - initiate a PCIe function level reset
4540  * @dev: device to reset
4541  *
4542  * Initiate a function level reset unconditionally on @dev without
4543  * checking any flags and DEVCAP
4544  */
pcie_flr(struct pci_dev * dev)4545 int pcie_flr(struct pci_dev *dev)
4546 {
4547 	if (!pci_wait_for_pending_transaction(dev))
4548 		pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
4549 
4550 	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
4551 
4552 	if (dev->imm_ready)
4553 		return 0;
4554 
4555 	/*
4556 	 * Per PCIe r4.0, sec 6.6.2, a device must complete an FLR within
4557 	 * 100ms, but may silently discard requests while the FLR is in
4558 	 * progress.  Wait 100ms before trying to access the device.
4559 	 */
4560 	msleep(100);
4561 
4562 	return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS);
4563 }
4564 EXPORT_SYMBOL_GPL(pcie_flr);
4565 
4566 /**
4567  * pcie_reset_flr - initiate a PCIe function level reset
4568  * @dev: device to reset
4569  * @probe: if true, return 0 if device can be reset this way
4570  *
4571  * Initiate a function level reset on @dev.
4572  */
pcie_reset_flr(struct pci_dev * dev,bool probe)4573 int pcie_reset_flr(struct pci_dev *dev, bool probe)
4574 {
4575 	if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4576 		return -ENOTTY;
4577 
4578 	if (!(dev->devcap & PCI_EXP_DEVCAP_FLR))
4579 		return -ENOTTY;
4580 
4581 	if (probe)
4582 		return 0;
4583 
4584 	return pcie_flr(dev);
4585 }
4586 EXPORT_SYMBOL_GPL(pcie_reset_flr);
4587 
pci_af_flr(struct pci_dev * dev,bool probe)4588 static int pci_af_flr(struct pci_dev *dev, bool probe)
4589 {
4590 	int pos;
4591 	u8 cap;
4592 
4593 	pos = pci_find_capability(dev, PCI_CAP_ID_AF);
4594 	if (!pos)
4595 		return -ENOTTY;
4596 
4597 	if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4598 		return -ENOTTY;
4599 
4600 	pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
4601 	if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
4602 		return -ENOTTY;
4603 
4604 	if (probe)
4605 		return 0;
4606 
4607 	/*
4608 	 * Wait for Transaction Pending bit to clear.  A word-aligned test
4609 	 * is used, so we use the control offset rather than status and shift
4610 	 * the test bit to match.
4611 	 */
4612 	if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
4613 				 PCI_AF_STATUS_TP << 8))
4614 		pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
4615 
4616 	pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
4617 
4618 	if (dev->imm_ready)
4619 		return 0;
4620 
4621 	/*
4622 	 * Per Advanced Capabilities for Conventional PCI ECN, 13 April 2006,
4623 	 * updated 27 July 2006; a device must complete an FLR within
4624 	 * 100ms, but may silently discard requests while the FLR is in
4625 	 * progress.  Wait 100ms before trying to access the device.
4626 	 */
4627 	msleep(100);
4628 
4629 	return pci_dev_wait(dev, "AF_FLR", PCIE_RESET_READY_POLL_MS);
4630 }
4631 
4632 /**
4633  * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
4634  * @dev: Device to reset.
4635  * @probe: if true, return 0 if the device can be reset this way.
4636  *
4637  * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
4638  * unset, it will be reinitialized internally when going from PCI_D3hot to
4639  * PCI_D0.  If that's the case and the device is not in a low-power state
4640  * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
4641  *
4642  * NOTE: This causes the caller to sleep for twice the device power transition
4643  * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
4644  * by default (i.e. unless the @dev's d3hot_delay field has a different value).
4645  * Moreover, only devices in D0 can be reset by this function.
4646  */
pci_pm_reset(struct pci_dev * dev,bool probe)4647 static int pci_pm_reset(struct pci_dev *dev, bool probe)
4648 {
4649 	u16 csr;
4650 
4651 	if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
4652 		return -ENOTTY;
4653 
4654 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
4655 	if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
4656 		return -ENOTTY;
4657 
4658 	if (probe)
4659 		return 0;
4660 
4661 	if (dev->current_state != PCI_D0)
4662 		return -EINVAL;
4663 
4664 	csr &= ~PCI_PM_CTRL_STATE_MASK;
4665 	csr |= PCI_D3hot;
4666 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4667 	pci_dev_d3_sleep(dev);
4668 
4669 	csr &= ~PCI_PM_CTRL_STATE_MASK;
4670 	csr |= PCI_D0;
4671 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4672 	pci_dev_d3_sleep(dev);
4673 
4674 	return pci_dev_wait(dev, "PM D3hot->D0", PCIE_RESET_READY_POLL_MS);
4675 }
4676 
4677 /**
4678  * pcie_wait_for_link_status - Wait for link status change
4679  * @pdev: Device whose link to wait for.
4680  * @use_lt: Use the LT bit if TRUE, or the DLLLA bit if FALSE.
4681  * @active: Waiting for active or inactive?
4682  *
4683  * Return 0 if successful, or -ETIMEDOUT if status has not changed within
4684  * PCIE_LINK_RETRAIN_TIMEOUT_MS milliseconds.
4685  */
pcie_wait_for_link_status(struct pci_dev * pdev,bool use_lt,bool active)4686 static int pcie_wait_for_link_status(struct pci_dev *pdev,
4687 				     bool use_lt, bool active)
4688 {
4689 	u16 lnksta_mask, lnksta_match;
4690 	unsigned long end_jiffies;
4691 	u16 lnksta;
4692 
4693 	lnksta_mask = use_lt ? PCI_EXP_LNKSTA_LT : PCI_EXP_LNKSTA_DLLLA;
4694 	lnksta_match = active ? lnksta_mask : 0;
4695 
4696 	end_jiffies = jiffies + msecs_to_jiffies(PCIE_LINK_RETRAIN_TIMEOUT_MS);
4697 	do {
4698 		pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta);
4699 		if ((lnksta & lnksta_mask) == lnksta_match)
4700 			return 0;
4701 		msleep(1);
4702 	} while (time_before(jiffies, end_jiffies));
4703 
4704 	return -ETIMEDOUT;
4705 }
4706 
4707 /**
4708  * pcie_retrain_link - Request a link retrain and wait for it to complete
4709  * @pdev: Device whose link to retrain.
4710  * @use_lt: Use the LT bit if TRUE, or the DLLLA bit if FALSE, for status.
4711  *
4712  * Retrain completion status is retrieved from the Link Status Register
4713  * according to @use_lt.  It is not verified whether the use of the DLLLA
4714  * bit is valid.
4715  *
4716  * Return 0 if successful, or -ETIMEDOUT if training has not completed
4717  * within PCIE_LINK_RETRAIN_TIMEOUT_MS milliseconds.
4718  */
pcie_retrain_link(struct pci_dev * pdev,bool use_lt)4719 int pcie_retrain_link(struct pci_dev *pdev, bool use_lt)
4720 {
4721 	int rc;
4722 
4723 	/*
4724 	 * Ensure the updated LNKCTL parameters are used during link
4725 	 * training by checking that there is no ongoing link training that
4726 	 * may have started before link parameters were changed, so as to
4727 	 * avoid LTSSM race as recommended in Implementation Note at the end
4728 	 * of PCIe r6.1 sec 7.5.3.7.
4729 	 */
4730 	rc = pcie_wait_for_link_status(pdev, true, false);
4731 	if (rc)
4732 		return rc;
4733 
4734 	pcie_capability_set_word(pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_RL);
4735 	if (pdev->clear_retrain_link) {
4736 		/*
4737 		 * Due to an erratum in some devices the Retrain Link bit
4738 		 * needs to be cleared again manually to allow the link
4739 		 * training to succeed.
4740 		 */
4741 		pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_RL);
4742 	}
4743 
4744 	rc = pcie_wait_for_link_status(pdev, use_lt, !use_lt);
4745 
4746 	/*
4747 	 * Clear LBMS after a manual retrain so that the bit can be used
4748 	 * to track link speed or width changes made by hardware itself
4749 	 * in attempt to correct unreliable link operation.
4750 	 */
4751 	pcie_capability_write_word(pdev, PCI_EXP_LNKSTA, PCI_EXP_LNKSTA_LBMS);
4752 	return rc;
4753 }
4754 
4755 /**
4756  * pcie_wait_for_link_delay - Wait until link is active or inactive
4757  * @pdev: Bridge device
4758  * @active: waiting for active or inactive?
4759  * @delay: Delay to wait after link has become active (in ms)
4760  *
4761  * Use this to wait till link becomes active or inactive.
4762  */
pcie_wait_for_link_delay(struct pci_dev * pdev,bool active,int delay)4763 static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
4764 				     int delay)
4765 {
4766 	int rc;
4767 
4768 	/*
4769 	 * Some controllers might not implement link active reporting. In this
4770 	 * case, we wait for 1000 ms + any delay requested by the caller.
4771 	 */
4772 	if (!pdev->link_active_reporting) {
4773 		msleep(PCIE_LINK_RETRAIN_TIMEOUT_MS + delay);
4774 		return true;
4775 	}
4776 
4777 	/*
4778 	 * PCIe r4.0 sec 6.6.1, a component must enter LTSSM Detect within 20ms,
4779 	 * after which we should expect the link to be active if the reset was
4780 	 * successful. If so, software must wait a minimum 100ms before sending
4781 	 * configuration requests to devices downstream this port.
4782 	 *
4783 	 * If the link fails to activate, either the device was physically
4784 	 * removed or the link is permanently failed.
4785 	 */
4786 	if (active)
4787 		msleep(20);
4788 	rc = pcie_wait_for_link_status(pdev, false, active);
4789 	if (active) {
4790 		if (rc)
4791 			rc = pcie_failed_link_retrain(pdev);
4792 		if (rc)
4793 			return false;
4794 
4795 		msleep(delay);
4796 		return true;
4797 	}
4798 
4799 	if (rc)
4800 		return false;
4801 
4802 	return true;
4803 }
4804 
4805 /**
4806  * pcie_wait_for_link - Wait until link is active or inactive
4807  * @pdev: Bridge device
4808  * @active: waiting for active or inactive?
4809  *
4810  * Use this to wait till link becomes active or inactive.
4811  */
pcie_wait_for_link(struct pci_dev * pdev,bool active)4812 bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
4813 {
4814 	return pcie_wait_for_link_delay(pdev, active, 100);
4815 }
4816 
4817 /*
4818  * Find maximum D3cold delay required by all the devices on the bus.  The
4819  * spec says 100 ms, but firmware can lower it and we allow drivers to
4820  * increase it as well.
4821  *
4822  * Called with @pci_bus_sem locked for reading.
4823  */
pci_bus_max_d3cold_delay(const struct pci_bus * bus)4824 static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
4825 {
4826 	const struct pci_dev *pdev;
4827 	int min_delay = 100;
4828 	int max_delay = 0;
4829 
4830 	list_for_each_entry(pdev, &bus->devices, bus_list) {
4831 		if (pdev->d3cold_delay < min_delay)
4832 			min_delay = pdev->d3cold_delay;
4833 		if (pdev->d3cold_delay > max_delay)
4834 			max_delay = pdev->d3cold_delay;
4835 	}
4836 
4837 	return max(min_delay, max_delay);
4838 }
4839 
4840 /**
4841  * pci_bridge_wait_for_secondary_bus - Wait for secondary bus to be accessible
4842  * @dev: PCI bridge
4843  * @reset_type: reset type in human-readable form
4844  *
4845  * Handle necessary delays before access to the devices on the secondary
4846  * side of the bridge are permitted after D3cold to D0 transition
4847  * or Conventional Reset.
4848  *
4849  * For PCIe this means the delays in PCIe 5.0 section 6.6.1. For
4850  * conventional PCI it means Tpvrh + Trhfa specified in PCI 3.0 section
4851  * 4.3.2.
4852  *
4853  * Return 0 on success or -ENOTTY if the first device on the secondary bus
4854  * failed to become accessible.
4855  */
pci_bridge_wait_for_secondary_bus(struct pci_dev * dev,char * reset_type)4856 int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type)
4857 {
4858 	struct pci_dev *child __free(pci_dev_put) = NULL;
4859 	int delay;
4860 
4861 	if (pci_dev_is_disconnected(dev))
4862 		return 0;
4863 
4864 	if (!pci_is_bridge(dev))
4865 		return 0;
4866 
4867 	down_read(&pci_bus_sem);
4868 
4869 	/*
4870 	 * We only deal with devices that are present currently on the bus.
4871 	 * For any hot-added devices the access delay is handled in pciehp
4872 	 * board_added(). In case of ACPI hotplug the firmware is expected
4873 	 * to configure the devices before OS is notified.
4874 	 */
4875 	if (!dev->subordinate || list_empty(&dev->subordinate->devices)) {
4876 		up_read(&pci_bus_sem);
4877 		return 0;
4878 	}
4879 
4880 	/* Take d3cold_delay requirements into account */
4881 	delay = pci_bus_max_d3cold_delay(dev->subordinate);
4882 	if (!delay) {
4883 		up_read(&pci_bus_sem);
4884 		return 0;
4885 	}
4886 
4887 	child = pci_dev_get(list_first_entry(&dev->subordinate->devices,
4888 					     struct pci_dev, bus_list));
4889 	up_read(&pci_bus_sem);
4890 
4891 	/*
4892 	 * Conventional PCI and PCI-X we need to wait Tpvrh + Trhfa before
4893 	 * accessing the device after reset (that is 1000 ms + 100 ms).
4894 	 */
4895 	if (!pci_is_pcie(dev)) {
4896 		pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay);
4897 		msleep(1000 + delay);
4898 		return 0;
4899 	}
4900 
4901 	/*
4902 	 * For PCIe downstream and root ports that do not support speeds
4903 	 * greater than 5 GT/s need to wait minimum 100 ms. For higher
4904 	 * speeds (gen3) we need to wait first for the data link layer to
4905 	 * become active.
4906 	 *
4907 	 * However, 100 ms is the minimum and the PCIe spec says the
4908 	 * software must allow at least 1s before it can determine that the
4909 	 * device that did not respond is a broken device. Also device can
4910 	 * take longer than that to respond if it indicates so through Request
4911 	 * Retry Status completions.
4912 	 *
4913 	 * Therefore we wait for 100 ms and check for the device presence
4914 	 * until the timeout expires.
4915 	 */
4916 	if (!pcie_downstream_port(dev))
4917 		return 0;
4918 
4919 	if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
4920 		u16 status;
4921 
4922 		pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
4923 		msleep(delay);
4924 
4925 		if (!pci_dev_wait(child, reset_type, PCI_RESET_WAIT - delay))
4926 			return 0;
4927 
4928 		/*
4929 		 * If the port supports active link reporting we now check
4930 		 * whether the link is active and if not bail out early with
4931 		 * the assumption that the device is not present anymore.
4932 		 */
4933 		if (!dev->link_active_reporting)
4934 			return -ENOTTY;
4935 
4936 		pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &status);
4937 		if (!(status & PCI_EXP_LNKSTA_DLLLA))
4938 			return -ENOTTY;
4939 
4940 		return pci_dev_wait(child, reset_type,
4941 				    PCIE_RESET_READY_POLL_MS - PCI_RESET_WAIT);
4942 	}
4943 
4944 	pci_dbg(dev, "waiting %d ms for downstream link, after activation\n",
4945 		delay);
4946 	if (!pcie_wait_for_link_delay(dev, true, delay)) {
4947 		/* Did not train, no need to wait any further */
4948 		pci_info(dev, "Data Link Layer Link Active not set in %d msec\n", delay);
4949 		return -ENOTTY;
4950 	}
4951 
4952 	return pci_dev_wait(child, reset_type,
4953 			    PCIE_RESET_READY_POLL_MS - delay);
4954 }
4955 
pci_reset_secondary_bus(struct pci_dev * dev)4956 void pci_reset_secondary_bus(struct pci_dev *dev)
4957 {
4958 	u16 ctrl;
4959 
4960 	pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
4961 	ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
4962 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4963 
4964 	/*
4965 	 * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms.  Double
4966 	 * this to 2ms to ensure that we meet the minimum requirement.
4967 	 */
4968 	msleep(2);
4969 
4970 	ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
4971 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4972 }
4973 
pcibios_reset_secondary_bus(struct pci_dev * dev)4974 void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
4975 {
4976 	pci_reset_secondary_bus(dev);
4977 }
4978 
4979 /**
4980  * pci_bridge_secondary_bus_reset - Reset the secondary bus on a PCI bridge.
4981  * @dev: Bridge device
4982  *
4983  * Use the bridge control register to assert reset on the secondary bus.
4984  * Devices on the secondary bus are left in power-on state.
4985  */
pci_bridge_secondary_bus_reset(struct pci_dev * dev)4986 int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
4987 {
4988 	if (!dev->block_cfg_access)
4989 		pci_warn_once(dev, "unlocked secondary bus reset via: %pS\n",
4990 			      __builtin_return_address(0));
4991 	pcibios_reset_secondary_bus(dev);
4992 
4993 	return pci_bridge_wait_for_secondary_bus(dev, "bus reset");
4994 }
4995 EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
4996 
pci_parent_bus_reset(struct pci_dev * dev,bool probe)4997 static int pci_parent_bus_reset(struct pci_dev *dev, bool probe)
4998 {
4999 	struct pci_dev *pdev;
5000 
5001 	if (pci_is_root_bus(dev->bus) || dev->subordinate ||
5002 	    !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
5003 		return -ENOTTY;
5004 
5005 	list_for_each_entry(pdev, &dev->bus->devices, bus_list)
5006 		if (pdev != dev)
5007 			return -ENOTTY;
5008 
5009 	if (probe)
5010 		return 0;
5011 
5012 	return pci_bridge_secondary_bus_reset(dev->bus->self);
5013 }
5014 
pci_reset_hotplug_slot(struct hotplug_slot * hotplug,bool probe)5015 static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, bool probe)
5016 {
5017 	int rc = -ENOTTY;
5018 
5019 	if (!hotplug || !try_module_get(hotplug->owner))
5020 		return rc;
5021 
5022 	if (hotplug->ops->reset_slot)
5023 		rc = hotplug->ops->reset_slot(hotplug, probe);
5024 
5025 	module_put(hotplug->owner);
5026 
5027 	return rc;
5028 }
5029 
pci_dev_reset_slot_function(struct pci_dev * dev,bool probe)5030 static int pci_dev_reset_slot_function(struct pci_dev *dev, bool probe)
5031 {
5032 	if (dev->multifunction || dev->subordinate || !dev->slot ||
5033 	    dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
5034 		return -ENOTTY;
5035 
5036 	return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
5037 }
5038 
cxl_port_dvsec(struct pci_dev * dev)5039 static u16 cxl_port_dvsec(struct pci_dev *dev)
5040 {
5041 	return pci_find_dvsec_capability(dev, PCI_VENDOR_ID_CXL,
5042 					 PCI_DVSEC_CXL_PORT);
5043 }
5044 
cxl_sbr_masked(struct pci_dev * dev)5045 static bool cxl_sbr_masked(struct pci_dev *dev)
5046 {
5047 	u16 dvsec, reg;
5048 	int rc;
5049 
5050 	dvsec = cxl_port_dvsec(dev);
5051 	if (!dvsec)
5052 		return false;
5053 
5054 	rc = pci_read_config_word(dev, dvsec + PCI_DVSEC_CXL_PORT_CTL, ®);
5055 	if (rc || PCI_POSSIBLE_ERROR(reg))
5056 		return false;
5057 
5058 	/*
5059 	 * Per CXL spec r3.1, sec 8.1.5.2, when "Unmask SBR" is 0, the SBR
5060 	 * bit in Bridge Control has no effect.  When 1, the Port generates
5061 	 * hot reset when the SBR bit is set to 1.
5062 	 */
5063 	if (reg & PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR)
5064 		return false;
5065 
5066 	return true;
5067 }
5068 
pci_reset_bus_function(struct pci_dev * dev,bool probe)5069 static int pci_reset_bus_function(struct pci_dev *dev, bool probe)
5070 {
5071 	struct pci_dev *bridge = pci_upstream_bridge(dev);
5072 	int rc;
5073 
5074 	/*
5075 	 * If "dev" is below a CXL port that has SBR control masked, SBR
5076 	 * won't do anything, so return error.
5077 	 */
5078 	if (bridge && cxl_sbr_masked(bridge)) {
5079 		if (probe)
5080 			return 0;
5081 
5082 		return -ENOTTY;
5083 	}
5084 
5085 	rc = pci_dev_reset_slot_function(dev, probe);
5086 	if (rc != -ENOTTY)
5087 		return rc;
5088 	return pci_parent_bus_reset(dev, probe);
5089 }
5090 
cxl_reset_bus_function(struct pci_dev * dev,bool probe)5091 static int cxl_reset_bus_function(struct pci_dev *dev, bool probe)
5092 {
5093 	struct pci_dev *bridge;
5094 	u16 dvsec, reg, val;
5095 	int rc;
5096 
5097 	bridge = pci_upstream_bridge(dev);
5098 	if (!bridge)
5099 		return -ENOTTY;
5100 
5101 	dvsec = cxl_port_dvsec(bridge);
5102 	if (!dvsec)
5103 		return -ENOTTY;
5104 
5105 	if (probe)
5106 		return 0;
5107 
5108 	rc = pci_read_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL, ®);
5109 	if (rc)
5110 		return -ENOTTY;
5111 
5112 	if (reg & PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR) {
5113 		val = reg;
5114 	} else {
5115 		val = reg | PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR;
5116 		pci_write_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL,
5117 				      val);
5118 	}
5119 
5120 	rc = pci_reset_bus_function(dev, probe);
5121 
5122 	if (reg != val)
5123 		pci_write_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL,
5124 				      reg);
5125 
5126 	return rc;
5127 }
5128 
pci_dev_lock(struct pci_dev * dev)5129 void pci_dev_lock(struct pci_dev *dev)
5130 {
5131 	/* block PM suspend, driver probe, etc. */
5132 	device_lock(&dev->dev);
5133 	pci_cfg_access_lock(dev);
5134 }
5135 EXPORT_SYMBOL_GPL(pci_dev_lock);
5136 
5137 /* Return 1 on successful lock, 0 on contention */
pci_dev_trylock(struct pci_dev * dev)5138 int pci_dev_trylock(struct pci_dev *dev)
5139 {
5140 	if (device_trylock(&dev->dev)) {
5141 		if (pci_cfg_access_trylock(dev))
5142 			return 1;
5143 		device_unlock(&dev->dev);
5144 	}
5145 
5146 	return 0;
5147 }
5148 EXPORT_SYMBOL_GPL(pci_dev_trylock);
5149 
pci_dev_unlock(struct pci_dev * dev)5150 void pci_dev_unlock(struct pci_dev *dev)
5151 {
5152 	pci_cfg_access_unlock(dev);
5153 	device_unlock(&dev->dev);
5154 }
5155 EXPORT_SYMBOL_GPL(pci_dev_unlock);
5156 
pci_dev_save_and_disable(struct pci_dev * dev)5157 static void pci_dev_save_and_disable(struct pci_dev *dev)
5158 {
5159 	const struct pci_error_handlers *err_handler =
5160 			dev->driver ? dev->driver->err_handler : NULL;
5161 
5162 	/*
5163 	 * dev->driver->err_handler->reset_prepare() is protected against
5164 	 * races with ->remove() by the device lock, which must be held by
5165 	 * the caller.
5166 	 */
5167 	if (err_handler && err_handler->reset_prepare)
5168 		err_handler->reset_prepare(dev);
5169 
5170 	/*
5171 	 * Wake-up device prior to save.  PM registers default to D0 after
5172 	 * reset and a simple register restore doesn't reliably return
5173 	 * to a non-D0 state anyway.
5174 	 */
5175 	pci_set_power_state(dev, PCI_D0);
5176 
5177 	pci_save_state(dev);
5178 	/*
5179 	 * Disable the device by clearing the Command register, except for
5180 	 * INTx-disable which is set.  This not only disables MMIO and I/O port
5181 	 * BARs, but also prevents the device from being Bus Master, preventing
5182 	 * DMA from the device including MSI/MSI-X interrupts.  For PCI 2.3
5183 	 * compliant devices, INTx-disable prevents legacy interrupts.
5184 	 */
5185 	pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
5186 }
5187 
pci_dev_restore(struct pci_dev * dev)5188 static void pci_dev_restore(struct pci_dev *dev)
5189 {
5190 	const struct pci_error_handlers *err_handler =
5191 			dev->driver ? dev->driver->err_handler : NULL;
5192 
5193 	pci_restore_state(dev);
5194 
5195 	/*
5196 	 * dev->driver->err_handler->reset_done() is protected against
5197 	 * races with ->remove() by the device lock, which must be held by
5198 	 * the caller.
5199 	 */
5200 	if (err_handler && err_handler->reset_done)
5201 		err_handler->reset_done(dev);
5202 }
5203 
5204 /* dev->reset_methods[] is a 0-terminated list of indices into this array */
5205 static const struct pci_reset_fn_method pci_reset_fn_methods[] = {
5206 	{ },
5207 	{ pci_dev_specific_reset, .name = "device_specific" },
5208 	{ pci_dev_acpi_reset, .name = "acpi" },
5209 	{ pcie_reset_flr, .name = "flr" },
5210 	{ pci_af_flr, .name = "af_flr" },
5211 	{ pci_pm_reset, .name = "pm" },
5212 	{ pci_reset_bus_function, .name = "bus" },
5213 	{ cxl_reset_bus_function, .name = "cxl_bus" },
5214 };
5215 
reset_method_show(struct device * dev,struct device_attribute * attr,char * buf)5216 static ssize_t reset_method_show(struct device *dev,
5217 				 struct device_attribute *attr, char *buf)
5218 {
5219 	struct pci_dev *pdev = to_pci_dev(dev);
5220 	ssize_t len = 0;
5221 	int i, m;
5222 
5223 	for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
5224 		m = pdev->reset_methods[i];
5225 		if (!m)
5226 			break;
5227 
5228 		len += sysfs_emit_at(buf, len, "%s%s", len ? " " : "",
5229 				     pci_reset_fn_methods[m].name);
5230 	}
5231 
5232 	if (len)
5233 		len += sysfs_emit_at(buf, len, "\n");
5234 
5235 	return len;
5236 }
5237 
reset_method_lookup(const char * name)5238 static int reset_method_lookup(const char *name)
5239 {
5240 	int m;
5241 
5242 	for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
5243 		if (sysfs_streq(name, pci_reset_fn_methods[m].name))
5244 			return m;
5245 	}
5246 
5247 	return 0;	/* not found */
5248 }
5249 
reset_method_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5250 static ssize_t reset_method_store(struct device *dev,
5251 				  struct device_attribute *attr,
5252 				  const char *buf, size_t count)
5253 {
5254 	struct pci_dev *pdev = to_pci_dev(dev);
5255 	char *options, *tmp_options, *name;
5256 	int m, n;
5257 	u8 reset_methods[PCI_NUM_RESET_METHODS] = { 0 };
5258 
5259 	if (sysfs_streq(buf, "")) {
5260 		pdev->reset_methods[0] = 0;
5261 		pci_warn(pdev, "All device reset methods disabled by user");
5262 		return count;
5263 	}
5264 
5265 	if (sysfs_streq(buf, "default")) {
5266 		pci_init_reset_methods(pdev);
5267 		return count;
5268 	}
5269 
5270 	options = kstrndup(buf, count, GFP_KERNEL);
5271 	if (!options)
5272 		return -ENOMEM;
5273 
5274 	n = 0;
5275 	tmp_options = options;
5276 	while ((name = strsep(&tmp_options, " ")) != NULL) {
5277 		if (sysfs_streq(name, ""))
5278 			continue;
5279 
5280 		name = strim(name);
5281 
5282 		m = reset_method_lookup(name);
5283 		if (!m) {
5284 			pci_err(pdev, "Invalid reset method '%s'", name);
5285 			goto error;
5286 		}
5287 
5288 		if (pci_reset_fn_methods[m].reset_fn(pdev, PCI_RESET_PROBE)) {
5289 			pci_err(pdev, "Unsupported reset method '%s'", name);
5290 			goto error;
5291 		}
5292 
5293 		if (n == PCI_NUM_RESET_METHODS - 1) {
5294 			pci_err(pdev, "Too many reset methods\n");
5295 			goto error;
5296 		}
5297 
5298 		reset_methods[n++] = m;
5299 	}
5300 
5301 	reset_methods[n] = 0;
5302 
5303 	/* Warn if dev-specific supported but not highest priority */
5304 	if (pci_reset_fn_methods[1].reset_fn(pdev, PCI_RESET_PROBE) == 0 &&
5305 	    reset_methods[0] != 1)
5306 		pci_warn(pdev, "Device-specific reset disabled/de-prioritized by user");
5307 	memcpy(pdev->reset_methods, reset_methods, sizeof(pdev->reset_methods));
5308 	kfree(options);
5309 	return count;
5310 
5311 error:
5312 	/* Leave previous methods unchanged */
5313 	kfree(options);
5314 	return -EINVAL;
5315 }
5316 static DEVICE_ATTR_RW(reset_method);
5317 
5318 static struct attribute *pci_dev_reset_method_attrs[] = {
5319 	&dev_attr_reset_method.attr,
5320 	NULL,
5321 };
5322 
pci_dev_reset_method_attr_is_visible(struct kobject * kobj,struct attribute * a,int n)5323 static umode_t pci_dev_reset_method_attr_is_visible(struct kobject *kobj,
5324 						    struct attribute *a, int n)
5325 {
5326 	struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
5327 
5328 	if (!pci_reset_supported(pdev))
5329 		return 0;
5330 
5331 	return a->mode;
5332 }
5333 
5334 const struct attribute_group pci_dev_reset_method_attr_group = {
5335 	.attrs = pci_dev_reset_method_attrs,
5336 	.is_visible = pci_dev_reset_method_attr_is_visible,
5337 };
5338 
5339 /**
5340  * __pci_reset_function_locked - reset a PCI device function while holding
5341  * the @dev mutex lock.
5342  * @dev: PCI device to reset
5343  *
5344  * Some devices allow an individual function to be reset without affecting
5345  * other functions in the same device.  The PCI device must be responsive
5346  * to PCI config space in order to use this function.
5347  *
5348  * The device function is presumed to be unused and the caller is holding
5349  * the device mutex lock when this function is called.
5350  *
5351  * Resetting the device will make the contents of PCI configuration space
5352  * random, so any caller of this must be prepared to reinitialise the
5353  * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
5354  * etc.
5355  *
5356  * Returns 0 if the device function was successfully reset or negative if the
5357  * device doesn't support resetting a single function.
5358  */
__pci_reset_function_locked(struct pci_dev * dev)5359 int __pci_reset_function_locked(struct pci_dev *dev)
5360 {
5361 	int i, m, rc;
5362 
5363 	might_sleep();
5364 
5365 	/*
5366 	 * A reset method returns -ENOTTY if it doesn't support this device and
5367 	 * we should try the next method.
5368 	 *
5369 	 * If it returns 0 (success), we're finished.  If it returns any other
5370 	 * error, we're also finished: this indicates that further reset
5371 	 * mechanisms might be broken on the device.
5372 	 */
5373 	for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
5374 		m = dev->reset_methods[i];
5375 		if (!m)
5376 			return -ENOTTY;
5377 
5378 		rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_DO_RESET);
5379 		if (!rc)
5380 			return 0;
5381 		if (rc != -ENOTTY)
5382 			return rc;
5383 	}
5384 
5385 	return -ENOTTY;
5386 }
5387 EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
5388 
5389 /**
5390  * pci_init_reset_methods - check whether device can be safely reset
5391  * and store supported reset mechanisms.
5392  * @dev: PCI device to check for reset mechanisms
5393  *
5394  * Some devices allow an individual function to be reset without affecting
5395  * other functions in the same device.  The PCI device must be in D0-D3hot
5396  * state.
5397  *
5398  * Stores reset mechanisms supported by device in reset_methods byte array
5399  * which is a member of struct pci_dev.
5400  */
pci_init_reset_methods(struct pci_dev * dev)5401 void pci_init_reset_methods(struct pci_dev *dev)
5402 {
5403 	int m, i, rc;
5404 
5405 	BUILD_BUG_ON(ARRAY_SIZE(pci_reset_fn_methods) != PCI_NUM_RESET_METHODS);
5406 
5407 	might_sleep();
5408 
5409 	i = 0;
5410 	for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
5411 		rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_PROBE);
5412 		if (!rc)
5413 			dev->reset_methods[i++] = m;
5414 		else if (rc != -ENOTTY)
5415 			break;
5416 	}
5417 
5418 	dev->reset_methods[i] = 0;
5419 }
5420 
5421 /**
5422  * pci_reset_function - quiesce and reset a PCI device function
5423  * @dev: PCI device to reset
5424  *
5425  * Some devices allow an individual function to be reset without affecting
5426  * other functions in the same device.  The PCI device must be responsive
5427  * to PCI config space in order to use this function.
5428  *
5429  * This function does not just reset the PCI portion of a device, but
5430  * clears all the state associated with the device.  This function differs
5431  * from __pci_reset_function_locked() in that it saves and restores device state
5432  * over the reset and takes the PCI device lock.
5433  *
5434  * Returns 0 if the device function was successfully reset or negative if the
5435  * device doesn't support resetting a single function.
5436  */
pci_reset_function(struct pci_dev * dev)5437 int pci_reset_function(struct pci_dev *dev)
5438 {
5439 	struct pci_dev *bridge;
5440 	int rc;
5441 
5442 	if (!pci_reset_supported(dev))
5443 		return -ENOTTY;
5444 
5445 	/*
5446 	 * If there's no upstream bridge, no locking is needed since there is
5447 	 * no upstream bridge configuration to hold consistent.
5448 	 */
5449 	bridge = pci_upstream_bridge(dev);
5450 	if (bridge)
5451 		pci_dev_lock(bridge);
5452 
5453 	pci_dev_lock(dev);
5454 	pci_dev_save_and_disable(dev);
5455 
5456 	rc = __pci_reset_function_locked(dev);
5457 
5458 	pci_dev_restore(dev);
5459 	pci_dev_unlock(dev);
5460 
5461 	if (bridge)
5462 		pci_dev_unlock(bridge);
5463 
5464 	return rc;
5465 }
5466 EXPORT_SYMBOL_GPL(pci_reset_function);
5467 
5468 /**
5469  * pci_reset_function_locked - quiesce and reset a PCI device function
5470  * @dev: PCI device to reset
5471  *
5472  * Some devices allow an individual function to be reset without affecting
5473  * other functions in the same device.  The PCI device must be responsive
5474  * to PCI config space in order to use this function.
5475  *
5476  * This function does not just reset the PCI portion of a device, but
5477  * clears all the state associated with the device.  This function differs
5478  * from __pci_reset_function_locked() in that it saves and restores device state
5479  * over the reset.  It also differs from pci_reset_function() in that it
5480  * requires the PCI device lock to be held.
5481  *
5482  * Returns 0 if the device function was successfully reset or negative if the
5483  * device doesn't support resetting a single function.
5484  */
pci_reset_function_locked(struct pci_dev * dev)5485 int pci_reset_function_locked(struct pci_dev *dev)
5486 {
5487 	int rc;
5488 
5489 	if (!pci_reset_supported(dev))
5490 		return -ENOTTY;
5491 
5492 	pci_dev_save_and_disable(dev);
5493 
5494 	rc = __pci_reset_function_locked(dev);
5495 
5496 	pci_dev_restore(dev);
5497 
5498 	return rc;
5499 }
5500 EXPORT_SYMBOL_GPL(pci_reset_function_locked);
5501 
5502 /**
5503  * pci_try_reset_function - quiesce and reset a PCI device function
5504  * @dev: PCI device to reset
5505  *
5506  * Same as above, except return -EAGAIN if unable to lock device.
5507  */
pci_try_reset_function(struct pci_dev * dev)5508 int pci_try_reset_function(struct pci_dev *dev)
5509 {
5510 	int rc;
5511 
5512 	if (!pci_reset_supported(dev))
5513 		return -ENOTTY;
5514 
5515 	if (!pci_dev_trylock(dev))
5516 		return -EAGAIN;
5517 
5518 	pci_dev_save_and_disable(dev);
5519 	rc = __pci_reset_function_locked(dev);
5520 	pci_dev_restore(dev);
5521 	pci_dev_unlock(dev);
5522 
5523 	return rc;
5524 }
5525 EXPORT_SYMBOL_GPL(pci_try_reset_function);
5526 
5527 /* Do any devices on or below this bus prevent a bus reset? */
pci_bus_resettable(struct pci_bus * bus)5528 static bool pci_bus_resettable(struct pci_bus *bus)
5529 {
5530 	struct pci_dev *dev;
5531 
5532 
5533 	if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5534 		return false;
5535 
5536 	list_for_each_entry(dev, &bus->devices, bus_list) {
5537 		if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5538 		    (dev->subordinate && !pci_bus_resettable(dev->subordinate)))
5539 			return false;
5540 	}
5541 
5542 	return true;
5543 }
5544 
5545 /* Lock devices from the top of the tree down */
pci_bus_lock(struct pci_bus * bus)5546 static void pci_bus_lock(struct pci_bus *bus)
5547 {
5548 	struct pci_dev *dev;
5549 
5550 	pci_dev_lock(bus->self);
5551 	list_for_each_entry(dev, &bus->devices, bus_list) {
5552 		if (dev->subordinate)
5553 			pci_bus_lock(dev->subordinate);
5554 		else
5555 			pci_dev_lock(dev);
5556 	}
5557 }
5558 
5559 /* Unlock devices from the bottom of the tree up */
pci_bus_unlock(struct pci_bus * bus)5560 static void pci_bus_unlock(struct pci_bus *bus)
5561 {
5562 	struct pci_dev *dev;
5563 
5564 	list_for_each_entry(dev, &bus->devices, bus_list) {
5565 		if (dev->subordinate)
5566 			pci_bus_unlock(dev->subordinate);
5567 		else
5568 			pci_dev_unlock(dev);
5569 	}
5570 	pci_dev_unlock(bus->self);
5571 }
5572 
5573 /* Return 1 on successful lock, 0 on contention */
pci_bus_trylock(struct pci_bus * bus)5574 static int pci_bus_trylock(struct pci_bus *bus)
5575 {
5576 	struct pci_dev *dev;
5577 
5578 	if (!pci_dev_trylock(bus->self))
5579 		return 0;
5580 
5581 	list_for_each_entry(dev, &bus->devices, bus_list) {
5582 		if (dev->subordinate) {
5583 			if (!pci_bus_trylock(dev->subordinate))
5584 				goto unlock;
5585 		} else if (!pci_dev_trylock(dev))
5586 			goto unlock;
5587 	}
5588 	return 1;
5589 
5590 unlock:
5591 	list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
5592 		if (dev->subordinate)
5593 			pci_bus_unlock(dev->subordinate);
5594 		else
5595 			pci_dev_unlock(dev);
5596 	}
5597 	pci_dev_unlock(bus->self);
5598 	return 0;
5599 }
5600 
5601 /* Do any devices on or below this slot prevent a bus reset? */
pci_slot_resettable(struct pci_slot * slot)5602 static bool pci_slot_resettable(struct pci_slot *slot)
5603 {
5604 	struct pci_dev *dev;
5605 
5606 	if (slot->bus->self &&
5607 	    (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5608 		return false;
5609 
5610 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5611 		if (!dev->slot || dev->slot != slot)
5612 			continue;
5613 		if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5614 		    (dev->subordinate && !pci_bus_resettable(dev->subordinate)))
5615 			return false;
5616 	}
5617 
5618 	return true;
5619 }
5620 
5621 /* Lock devices from the top of the tree down */
pci_slot_lock(struct pci_slot * slot)5622 static void pci_slot_lock(struct pci_slot *slot)
5623 {
5624 	struct pci_dev *dev;
5625 
5626 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5627 		if (!dev->slot || dev->slot != slot)
5628 			continue;
5629 		if (dev->subordinate)
5630 			pci_bus_lock(dev->subordinate);
5631 		else
5632 			pci_dev_lock(dev);
5633 	}
5634 }
5635 
5636 /* Unlock devices from the bottom of the tree up */
pci_slot_unlock(struct pci_slot * slot)5637 static void pci_slot_unlock(struct pci_slot *slot)
5638 {
5639 	struct pci_dev *dev;
5640 
5641 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5642 		if (!dev->slot || dev->slot != slot)
5643 			continue;
5644 		if (dev->subordinate)
5645 			pci_bus_unlock(dev->subordinate);
5646 		else
5647 			pci_dev_unlock(dev);
5648 	}
5649 }
5650 
5651 /* Return 1 on successful lock, 0 on contention */
pci_slot_trylock(struct pci_slot * slot)5652 static int pci_slot_trylock(struct pci_slot *slot)
5653 {
5654 	struct pci_dev *dev;
5655 
5656 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5657 		if (!dev->slot || dev->slot != slot)
5658 			continue;
5659 		if (dev->subordinate) {
5660 			if (!pci_bus_trylock(dev->subordinate)) {
5661 				pci_dev_unlock(dev);
5662 				goto unlock;
5663 			}
5664 		} else if (!pci_dev_trylock(dev))
5665 			goto unlock;
5666 	}
5667 	return 1;
5668 
5669 unlock:
5670 	list_for_each_entry_continue_reverse(dev,
5671 					     &slot->bus->devices, bus_list) {
5672 		if (!dev->slot || dev->slot != slot)
5673 			continue;
5674 		if (dev->subordinate)
5675 			pci_bus_unlock(dev->subordinate);
5676 		else
5677 			pci_dev_unlock(dev);
5678 	}
5679 	return 0;
5680 }
5681 
5682 /*
5683  * Save and disable devices from the top of the tree down while holding
5684  * the @dev mutex lock for the entire tree.
5685  */
pci_bus_save_and_disable_locked(struct pci_bus * bus)5686 static void pci_bus_save_and_disable_locked(struct pci_bus *bus)
5687 {
5688 	struct pci_dev *dev;
5689 
5690 	list_for_each_entry(dev, &bus->devices, bus_list) {
5691 		pci_dev_save_and_disable(dev);
5692 		if (dev->subordinate)
5693 			pci_bus_save_and_disable_locked(dev->subordinate);
5694 	}
5695 }
5696 
5697 /*
5698  * Restore devices from top of the tree down while holding @dev mutex lock
5699  * for the entire tree.  Parent bridges need to be restored before we can
5700  * get to subordinate devices.
5701  */
pci_bus_restore_locked(struct pci_bus * bus)5702 static void pci_bus_restore_locked(struct pci_bus *bus)
5703 {
5704 	struct pci_dev *dev;
5705 
5706 	list_for_each_entry(dev, &bus->devices, bus_list) {
5707 		pci_dev_restore(dev);
5708 		if (dev->subordinate) {
5709 			pci_bridge_wait_for_secondary_bus(dev, "bus reset");
5710 			pci_bus_restore_locked(dev->subordinate);
5711 		}
5712 	}
5713 }
5714 
5715 /*
5716  * Save and disable devices from the top of the tree down while holding
5717  * the @dev mutex lock for the entire tree.
5718  */
pci_slot_save_and_disable_locked(struct pci_slot * slot)5719 static void pci_slot_save_and_disable_locked(struct pci_slot *slot)
5720 {
5721 	struct pci_dev *dev;
5722 
5723 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5724 		if (!dev->slot || dev->slot != slot)
5725 			continue;
5726 		pci_dev_save_and_disable(dev);
5727 		if (dev->subordinate)
5728 			pci_bus_save_and_disable_locked(dev->subordinate);
5729 	}
5730 }
5731 
5732 /*
5733  * Restore devices from top of the tree down while holding @dev mutex lock
5734  * for the entire tree.  Parent bridges need to be restored before we can
5735  * get to subordinate devices.
5736  */
pci_slot_restore_locked(struct pci_slot * slot)5737 static void pci_slot_restore_locked(struct pci_slot *slot)
5738 {
5739 	struct pci_dev *dev;
5740 
5741 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5742 		if (!dev->slot || dev->slot != slot)
5743 			continue;
5744 		pci_dev_restore(dev);
5745 		if (dev->subordinate) {
5746 			pci_bridge_wait_for_secondary_bus(dev, "slot reset");
5747 			pci_bus_restore_locked(dev->subordinate);
5748 		}
5749 	}
5750 }
5751 
pci_slot_reset(struct pci_slot * slot,bool probe)5752 static int pci_slot_reset(struct pci_slot *slot, bool probe)
5753 {
5754 	int rc;
5755 
5756 	if (!slot || !pci_slot_resettable(slot))
5757 		return -ENOTTY;
5758 
5759 	if (!probe)
5760 		pci_slot_lock(slot);
5761 
5762 	might_sleep();
5763 
5764 	rc = pci_reset_hotplug_slot(slot->hotplug, probe);
5765 
5766 	if (!probe)
5767 		pci_slot_unlock(slot);
5768 
5769 	return rc;
5770 }
5771 
5772 /**
5773  * pci_probe_reset_slot - probe whether a PCI slot can be reset
5774  * @slot: PCI slot to probe
5775  *
5776  * Return 0 if slot can be reset, negative if a slot reset is not supported.
5777  */
pci_probe_reset_slot(struct pci_slot * slot)5778 int pci_probe_reset_slot(struct pci_slot *slot)
5779 {
5780 	return pci_slot_reset(slot, PCI_RESET_PROBE);
5781 }
5782 EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
5783 
5784 /**
5785  * __pci_reset_slot - Try to reset a PCI slot
5786  * @slot: PCI slot to reset
5787  *
5788  * A PCI bus may host multiple slots, each slot may support a reset mechanism
5789  * independent of other slots.  For instance, some slots may support slot power
5790  * control.  In the case of a 1:1 bus to slot architecture, this function may
5791  * wrap the bus reset to avoid spurious slot related events such as hotplug.
5792  * Generally a slot reset should be attempted before a bus reset.  All of the
5793  * function of the slot and any subordinate buses behind the slot are reset
5794  * through this function.  PCI config space of all devices in the slot and
5795  * behind the slot is saved before and restored after reset.
5796  *
5797  * Same as above except return -EAGAIN if the slot cannot be locked
5798  */
__pci_reset_slot(struct pci_slot * slot)5799 static int __pci_reset_slot(struct pci_slot *slot)
5800 {
5801 	int rc;
5802 
5803 	rc = pci_slot_reset(slot, PCI_RESET_PROBE);
5804 	if (rc)
5805 		return rc;
5806 
5807 	if (pci_slot_trylock(slot)) {
5808 		pci_slot_save_and_disable_locked(slot);
5809 		might_sleep();
5810 		rc = pci_reset_hotplug_slot(slot->hotplug, PCI_RESET_DO_RESET);
5811 		pci_slot_restore_locked(slot);
5812 		pci_slot_unlock(slot);
5813 	} else
5814 		rc = -EAGAIN;
5815 
5816 	return rc;
5817 }
5818 
pci_bus_reset(struct pci_bus * bus,bool probe)5819 static int pci_bus_reset(struct pci_bus *bus, bool probe)
5820 {
5821 	int ret;
5822 
5823 	if (!bus->self || !pci_bus_resettable(bus))
5824 		return -ENOTTY;
5825 
5826 	if (probe)
5827 		return 0;
5828 
5829 	pci_bus_lock(bus);
5830 
5831 	might_sleep();
5832 
5833 	ret = pci_bridge_secondary_bus_reset(bus->self);
5834 
5835 	pci_bus_unlock(bus);
5836 
5837 	return ret;
5838 }
5839 
5840 /**
5841  * pci_bus_error_reset - reset the bridge's subordinate bus
5842  * @bridge: The parent device that connects to the bus to reset
5843  *
5844  * This function will first try to reset the slots on this bus if the method is
5845  * available. If slot reset fails or is not available, this will fall back to a
5846  * secondary bus reset.
5847  */
pci_bus_error_reset(struct pci_dev * bridge)5848 int pci_bus_error_reset(struct pci_dev *bridge)
5849 {
5850 	struct pci_bus *bus = bridge->subordinate;
5851 	struct pci_slot *slot;
5852 
5853 	if (!bus)
5854 		return -ENOTTY;
5855 
5856 	mutex_lock(&pci_slot_mutex);
5857 	if (list_empty(&bus->slots))
5858 		goto bus_reset;
5859 
5860 	list_for_each_entry(slot, &bus->slots, list)
5861 		if (pci_probe_reset_slot(slot))
5862 			goto bus_reset;
5863 
5864 	list_for_each_entry(slot, &bus->slots, list)
5865 		if (pci_slot_reset(slot, PCI_RESET_DO_RESET))
5866 			goto bus_reset;
5867 
5868 	mutex_unlock(&pci_slot_mutex);
5869 	return 0;
5870 bus_reset:
5871 	mutex_unlock(&pci_slot_mutex);
5872 	return pci_bus_reset(bridge->subordinate, PCI_RESET_DO_RESET);
5873 }
5874 
5875 /**
5876  * pci_probe_reset_bus - probe whether a PCI bus can be reset
5877  * @bus: PCI bus to probe
5878  *
5879  * Return 0 if bus can be reset, negative if a bus reset is not supported.
5880  */
pci_probe_reset_bus(struct pci_bus * bus)5881 int pci_probe_reset_bus(struct pci_bus *bus)
5882 {
5883 	return pci_bus_reset(bus, PCI_RESET_PROBE);
5884 }
5885 EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
5886 
5887 /**
5888  * __pci_reset_bus - Try to reset a PCI bus
5889  * @bus: top level PCI bus to reset
5890  *
5891  * Same as above except return -EAGAIN if the bus cannot be locked
5892  */
__pci_reset_bus(struct pci_bus * bus)5893 int __pci_reset_bus(struct pci_bus *bus)
5894 {
5895 	int rc;
5896 
5897 	rc = pci_bus_reset(bus, PCI_RESET_PROBE);
5898 	if (rc)
5899 		return rc;
5900 
5901 	if (pci_bus_trylock(bus)) {
5902 		pci_bus_save_and_disable_locked(bus);
5903 		might_sleep();
5904 		rc = pci_bridge_secondary_bus_reset(bus->self);
5905 		pci_bus_restore_locked(bus);
5906 		pci_bus_unlock(bus);
5907 	} else
5908 		rc = -EAGAIN;
5909 
5910 	return rc;
5911 }
5912 
5913 /**
5914  * pci_reset_bus - Try to reset a PCI bus
5915  * @pdev: top level PCI device to reset via slot/bus
5916  *
5917  * Same as above except return -EAGAIN if the bus cannot be locked
5918  */
pci_reset_bus(struct pci_dev * pdev)5919 int pci_reset_bus(struct pci_dev *pdev)
5920 {
5921 	return (!pci_probe_reset_slot(pdev->slot)) ?
5922 	    __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
5923 }
5924 EXPORT_SYMBOL_GPL(pci_reset_bus);
5925 
5926 /**
5927  * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
5928  * @dev: PCI device to query
5929  *
5930  * Returns mmrbc: maximum designed memory read count in bytes or
5931  * appropriate error value.
5932  */
pcix_get_max_mmrbc(struct pci_dev * dev)5933 int pcix_get_max_mmrbc(struct pci_dev *dev)
5934 {
5935 	int cap;
5936 	u32 stat;
5937 
5938 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5939 	if (!cap)
5940 		return -EINVAL;
5941 
5942 	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5943 		return -EINVAL;
5944 
5945 	return 512 << FIELD_GET(PCI_X_STATUS_MAX_READ, stat);
5946 }
5947 EXPORT_SYMBOL(pcix_get_max_mmrbc);
5948 
5949 /**
5950  * pcix_get_mmrbc - get PCI-X maximum memory read byte count
5951  * @dev: PCI device to query
5952  *
5953  * Returns mmrbc: maximum memory read count in bytes or appropriate error
5954  * value.
5955  */
pcix_get_mmrbc(struct pci_dev * dev)5956 int pcix_get_mmrbc(struct pci_dev *dev)
5957 {
5958 	int cap;
5959 	u16 cmd;
5960 
5961 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5962 	if (!cap)
5963 		return -EINVAL;
5964 
5965 	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5966 		return -EINVAL;
5967 
5968 	return 512 << FIELD_GET(PCI_X_CMD_MAX_READ, cmd);
5969 }
5970 EXPORT_SYMBOL(pcix_get_mmrbc);
5971 
5972 /**
5973  * pcix_set_mmrbc - set PCI-X maximum memory read byte count
5974  * @dev: PCI device to query
5975  * @mmrbc: maximum memory read count in bytes
5976  *    valid values are 512, 1024, 2048, 4096
5977  *
5978  * If possible sets maximum memory read byte count, some bridges have errata
5979  * that prevent this.
5980  */
pcix_set_mmrbc(struct pci_dev * dev,int mmrbc)5981 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
5982 {
5983 	int cap;
5984 	u32 stat, v, o;
5985 	u16 cmd;
5986 
5987 	if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
5988 		return -EINVAL;
5989 
5990 	v = ffs(mmrbc) - 10;
5991 
5992 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5993 	if (!cap)
5994 		return -EINVAL;
5995 
5996 	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5997 		return -EINVAL;
5998 
5999 	if (v > FIELD_GET(PCI_X_STATUS_MAX_READ, stat))
6000 		return -E2BIG;
6001 
6002 	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
6003 		return -EINVAL;
6004 
6005 	o = FIELD_GET(PCI_X_CMD_MAX_READ, cmd);
6006 	if (o != v) {
6007 		if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
6008 			return -EIO;
6009 
6010 		cmd &= ~PCI_X_CMD_MAX_READ;
6011 		cmd |= FIELD_PREP(PCI_X_CMD_MAX_READ, v);
6012 		if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
6013 			return -EIO;
6014 	}
6015 	return 0;
6016 }
6017 EXPORT_SYMBOL(pcix_set_mmrbc);
6018 
6019 /**
6020  * pcie_get_readrq - get PCI Express read request size
6021  * @dev: PCI device to query
6022  *
6023  * Returns maximum memory read request in bytes or appropriate error value.
6024  */
pcie_get_readrq(struct pci_dev * dev)6025 int pcie_get_readrq(struct pci_dev *dev)
6026 {
6027 	u16 ctl;
6028 
6029 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
6030 
6031 	return 128 << FIELD_GET(PCI_EXP_DEVCTL_READRQ, ctl);
6032 }
6033 EXPORT_SYMBOL(pcie_get_readrq);
6034 
6035 /**
6036  * pcie_set_readrq - set PCI Express maximum memory read request
6037  * @dev: PCI device to query
6038  * @rq: maximum memory read count in bytes
6039  *    valid values are 128, 256, 512, 1024, 2048, 4096
6040  *
6041  * If possible sets maximum memory read request in bytes
6042  */
pcie_set_readrq(struct pci_dev * dev,int rq)6043 int pcie_set_readrq(struct pci_dev *dev, int rq)
6044 {
6045 	u16 v;
6046 	int ret;
6047 	struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
6048 
6049 	if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
6050 		return -EINVAL;
6051 
6052 	/*
6053 	 * If using the "performance" PCIe config, we clamp the read rq
6054 	 * size to the max packet size to keep the host bridge from
6055 	 * generating requests larger than we can cope with.
6056 	 */
6057 	if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
6058 		int mps = pcie_get_mps(dev);
6059 
6060 		if (mps < rq)
6061 			rq = mps;
6062 	}
6063 
6064 	v = FIELD_PREP(PCI_EXP_DEVCTL_READRQ, ffs(rq) - 8);
6065 
6066 	if (bridge->no_inc_mrrs) {
6067 		int max_mrrs = pcie_get_readrq(dev);
6068 
6069 		if (rq > max_mrrs) {
6070 			pci_info(dev, "can't set Max_Read_Request_Size to %d; max is %d\n", rq, max_mrrs);
6071 			return -EINVAL;
6072 		}
6073 	}
6074 
6075 	ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
6076 						  PCI_EXP_DEVCTL_READRQ, v);
6077 
6078 	return pcibios_err_to_errno(ret);
6079 }
6080 EXPORT_SYMBOL(pcie_set_readrq);
6081 
6082 /**
6083  * pcie_get_mps - get PCI Express maximum payload size
6084  * @dev: PCI device to query
6085  *
6086  * Returns maximum payload size in bytes
6087  */
pcie_get_mps(struct pci_dev * dev)6088 int pcie_get_mps(struct pci_dev *dev)
6089 {
6090 	u16 ctl;
6091 
6092 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
6093 
6094 	return 128 << FIELD_GET(PCI_EXP_DEVCTL_PAYLOAD, ctl);
6095 }
6096 EXPORT_SYMBOL(pcie_get_mps);
6097 
6098 /**
6099  * pcie_set_mps - set PCI Express maximum payload size
6100  * @dev: PCI device to query
6101  * @mps: maximum payload size in bytes
6102  *    valid values are 128, 256, 512, 1024, 2048, 4096
6103  *
6104  * If possible sets maximum payload size
6105  */
pcie_set_mps(struct pci_dev * dev,int mps)6106 int pcie_set_mps(struct pci_dev *dev, int mps)
6107 {
6108 	u16 v;
6109 	int ret;
6110 
6111 	if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
6112 		return -EINVAL;
6113 
6114 	v = ffs(mps) - 8;
6115 	if (v > dev->pcie_mpss)
6116 		return -EINVAL;
6117 	v = FIELD_PREP(PCI_EXP_DEVCTL_PAYLOAD, v);
6118 
6119 	ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
6120 						  PCI_EXP_DEVCTL_PAYLOAD, v);
6121 
6122 	return pcibios_err_to_errno(ret);
6123 }
6124 EXPORT_SYMBOL(pcie_set_mps);
6125 
to_pcie_link_speed(u16 lnksta)6126 static enum pci_bus_speed to_pcie_link_speed(u16 lnksta)
6127 {
6128 	return pcie_link_speed[FIELD_GET(PCI_EXP_LNKSTA_CLS, lnksta)];
6129 }
6130 
pcie_link_speed_mbps(struct pci_dev * pdev)6131 int pcie_link_speed_mbps(struct pci_dev *pdev)
6132 {
6133 	u16 lnksta;
6134 	int err;
6135 
6136 	err = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta);
6137 	if (err)
6138 		return err;
6139 
6140 	return pcie_dev_speed_mbps(to_pcie_link_speed(lnksta));
6141 }
6142 EXPORT_SYMBOL(pcie_link_speed_mbps);
6143 
6144 /**
6145  * pcie_bandwidth_available - determine minimum link settings of a PCIe
6146  *			      device and its bandwidth limitation
6147  * @dev: PCI device to query
6148  * @limiting_dev: storage for device causing the bandwidth limitation
6149  * @speed: storage for speed of limiting device
6150  * @width: storage for width of limiting device
6151  *
6152  * Walk up the PCI device chain and find the point where the minimum
6153  * bandwidth is available.  Return the bandwidth available there and (if
6154  * limiting_dev, speed, and width pointers are supplied) information about
6155  * that point.  The bandwidth returned is in Mb/s, i.e., megabits/second of
6156  * raw bandwidth.
6157  */
pcie_bandwidth_available(struct pci_dev * dev,struct pci_dev ** limiting_dev,enum pci_bus_speed * speed,enum pcie_link_width * width)6158 u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
6159 			     enum pci_bus_speed *speed,
6160 			     enum pcie_link_width *width)
6161 {
6162 	u16 lnksta;
6163 	enum pci_bus_speed next_speed;
6164 	enum pcie_link_width next_width;
6165 	u32 bw, next_bw;
6166 
6167 	if (speed)
6168 		*speed = PCI_SPEED_UNKNOWN;
6169 	if (width)
6170 		*width = PCIE_LNK_WIDTH_UNKNOWN;
6171 
6172 	bw = 0;
6173 
6174 	while (dev) {
6175 		pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
6176 
6177 		next_speed = to_pcie_link_speed(lnksta);
6178 		next_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta);
6179 
6180 		next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
6181 
6182 		/* Check if current device limits the total bandwidth */
6183 		if (!bw || next_bw <= bw) {
6184 			bw = next_bw;
6185 
6186 			if (limiting_dev)
6187 				*limiting_dev = dev;
6188 			if (speed)
6189 				*speed = next_speed;
6190 			if (width)
6191 				*width = next_width;
6192 		}
6193 
6194 		dev = pci_upstream_bridge(dev);
6195 	}
6196 
6197 	return bw;
6198 }
6199 EXPORT_SYMBOL(pcie_bandwidth_available);
6200 
6201 /**
6202  * pcie_get_speed_cap - query for the PCI device's link speed capability
6203  * @dev: PCI device to query
6204  *
6205  * Query the PCI device speed capability.  Return the maximum link speed
6206  * supported by the device.
6207  */
pcie_get_speed_cap(struct pci_dev * dev)6208 enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
6209 {
6210 	u32 lnkcap2, lnkcap;
6211 
6212 	/*
6213 	 * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18.  The
6214 	 * implementation note there recommends using the Supported Link
6215 	 * Speeds Vector in Link Capabilities 2 when supported.
6216 	 *
6217 	 * Without Link Capabilities 2, i.e., prior to PCIe r3.0, software
6218 	 * should use the Supported Link Speeds field in Link Capabilities,
6219 	 * where only 2.5 GT/s and 5.0 GT/s speeds were defined.
6220 	 */
6221 	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
6222 
6223 	/* PCIe r3.0-compliant */
6224 	if (lnkcap2)
6225 		return PCIE_LNKCAP2_SLS2SPEED(lnkcap2);
6226 
6227 	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
6228 	if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
6229 		return PCIE_SPEED_5_0GT;
6230 	else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB)
6231 		return PCIE_SPEED_2_5GT;
6232 
6233 	return PCI_SPEED_UNKNOWN;
6234 }
6235 EXPORT_SYMBOL(pcie_get_speed_cap);
6236 
6237 /**
6238  * pcie_get_width_cap - query for the PCI device's link width capability
6239  * @dev: PCI device to query
6240  *
6241  * Query the PCI device width capability.  Return the maximum link width
6242  * supported by the device.
6243  */
pcie_get_width_cap(struct pci_dev * dev)6244 enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
6245 {
6246 	u32 lnkcap;
6247 
6248 	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
6249 	if (lnkcap)
6250 		return FIELD_GET(PCI_EXP_LNKCAP_MLW, lnkcap);
6251 
6252 	return PCIE_LNK_WIDTH_UNKNOWN;
6253 }
6254 EXPORT_SYMBOL(pcie_get_width_cap);
6255 
6256 /**
6257  * pcie_bandwidth_capable - calculate a PCI device's link bandwidth capability
6258  * @dev: PCI device
6259  * @speed: storage for link speed
6260  * @width: storage for link width
6261  *
6262  * Calculate a PCI device's link bandwidth by querying for its link speed
6263  * and width, multiplying them, and applying encoding overhead.  The result
6264  * is in Mb/s, i.e., megabits/second of raw bandwidth.
6265  */
pcie_bandwidth_capable(struct pci_dev * dev,enum pci_bus_speed * speed,enum pcie_link_width * width)6266 static u32 pcie_bandwidth_capable(struct pci_dev *dev,
6267 				  enum pci_bus_speed *speed,
6268 				  enum pcie_link_width *width)
6269 {
6270 	*speed = pcie_get_speed_cap(dev);
6271 	*width = pcie_get_width_cap(dev);
6272 
6273 	if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
6274 		return 0;
6275 
6276 	return *width * PCIE_SPEED2MBS_ENC(*speed);
6277 }
6278 
6279 /**
6280  * __pcie_print_link_status - Report the PCI device's link speed and width
6281  * @dev: PCI device to query
6282  * @verbose: Print info even when enough bandwidth is available
6283  *
6284  * If the available bandwidth at the device is less than the device is
6285  * capable of, report the device's maximum possible bandwidth and the
6286  * upstream link that limits its performance.  If @verbose, always print
6287  * the available bandwidth, even if the device isn't constrained.
6288  */
__pcie_print_link_status(struct pci_dev * dev,bool verbose)6289 void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
6290 {
6291 	enum pcie_link_width width, width_cap;
6292 	enum pci_bus_speed speed, speed_cap;
6293 	struct pci_dev *limiting_dev = NULL;
6294 	u32 bw_avail, bw_cap;
6295 
6296 	bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
6297 	bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
6298 
6299 	if (bw_avail >= bw_cap && verbose)
6300 		pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
6301 			 bw_cap / 1000, bw_cap % 1000,
6302 			 pci_speed_string(speed_cap), width_cap);
6303 	else if (bw_avail < bw_cap)
6304 		pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
6305 			 bw_avail / 1000, bw_avail % 1000,
6306 			 pci_speed_string(speed), width,
6307 			 limiting_dev ? pci_name(limiting_dev) : "<unknown>",
6308 			 bw_cap / 1000, bw_cap % 1000,
6309 			 pci_speed_string(speed_cap), width_cap);
6310 }
6311 
6312 /**
6313  * pcie_print_link_status - Report the PCI device's link speed and width
6314  * @dev: PCI device to query
6315  *
6316  * Report the available bandwidth at the device.
6317  */
pcie_print_link_status(struct pci_dev * dev)6318 void pcie_print_link_status(struct pci_dev *dev)
6319 {
6320 	__pcie_print_link_status(dev, true);
6321 }
6322 EXPORT_SYMBOL(pcie_print_link_status);
6323 
6324 /**
6325  * pci_select_bars - Make BAR mask from the type of resource
6326  * @dev: the PCI device for which BAR mask is made
6327  * @flags: resource type mask to be selected
6328  *
6329  * This helper routine makes bar mask from the type of resource.
6330  */
pci_select_bars(struct pci_dev * dev,unsigned long flags)6331 int pci_select_bars(struct pci_dev *dev, unsigned long flags)
6332 {
6333 	int i, bars = 0;
6334 	for (i = 0; i < PCI_NUM_RESOURCES; i++)
6335 		if (pci_resource_flags(dev, i) & flags)
6336 			bars |= (1 << i);
6337 	return bars;
6338 }
6339 EXPORT_SYMBOL(pci_select_bars);
6340 
6341 /* Some architectures require additional programming to enable VGA */
6342 static arch_set_vga_state_t arch_set_vga_state;
6343 
pci_register_set_vga_state(arch_set_vga_state_t func)6344 void __init pci_register_set_vga_state(arch_set_vga_state_t func)
6345 {
6346 	arch_set_vga_state = func;	/* NULL disables */
6347 }
6348 
pci_set_vga_state_arch(struct pci_dev * dev,bool decode,unsigned int command_bits,u32 flags)6349 static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
6350 				  unsigned int command_bits, u32 flags)
6351 {
6352 	if (arch_set_vga_state)
6353 		return arch_set_vga_state(dev, decode, command_bits,
6354 						flags);
6355 	return 0;
6356 }
6357 
6358 /**
6359  * pci_set_vga_state - set VGA decode state on device and parents if requested
6360  * @dev: the PCI device
6361  * @decode: true = enable decoding, false = disable decoding
6362  * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
6363  * @flags: traverse ancestors and change bridges
6364  * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
6365  */
pci_set_vga_state(struct pci_dev * dev,bool decode,unsigned int command_bits,u32 flags)6366 int pci_set_vga_state(struct pci_dev *dev, bool decode,
6367 		      unsigned int command_bits, u32 flags)
6368 {
6369 	struct pci_bus *bus;
6370 	struct pci_dev *bridge;
6371 	u16 cmd;
6372 	int rc;
6373 
6374 	WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
6375 
6376 	/* ARCH specific VGA enables */
6377 	rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
6378 	if (rc)
6379 		return rc;
6380 
6381 	if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
6382 		pci_read_config_word(dev, PCI_COMMAND, &cmd);
6383 		if (decode)
6384 			cmd |= command_bits;
6385 		else
6386 			cmd &= ~command_bits;
6387 		pci_write_config_word(dev, PCI_COMMAND, cmd);
6388 	}
6389 
6390 	if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
6391 		return 0;
6392 
6393 	bus = dev->bus;
6394 	while (bus) {
6395 		bridge = bus->self;
6396 		if (bridge) {
6397 			pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
6398 					     &cmd);
6399 			if (decode)
6400 				cmd |= PCI_BRIDGE_CTL_VGA;
6401 			else
6402 				cmd &= ~PCI_BRIDGE_CTL_VGA;
6403 			pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
6404 					      cmd);
6405 		}
6406 		bus = bus->parent;
6407 	}
6408 	return 0;
6409 }
6410 
6411 #ifdef CONFIG_ACPI
pci_pr3_present(struct pci_dev * pdev)6412 bool pci_pr3_present(struct pci_dev *pdev)
6413 {
6414 	struct acpi_device *adev;
6415 
6416 	if (acpi_disabled)
6417 		return false;
6418 
6419 	adev = ACPI_COMPANION(&pdev->dev);
6420 	if (!adev)
6421 		return false;
6422 
6423 	return adev->power.flags.power_resources &&
6424 		acpi_has_method(adev->handle, "_PR3");
6425 }
6426 EXPORT_SYMBOL_GPL(pci_pr3_present);
6427 #endif
6428 
6429 /**
6430  * pci_add_dma_alias - Add a DMA devfn alias for a device
6431  * @dev: the PCI device for which alias is added
6432  * @devfn_from: alias slot and function
6433  * @nr_devfns: number of subsequent devfns to alias
6434  *
6435  * This helper encodes an 8-bit devfn as a bit number in dma_alias_mask
6436  * which is used to program permissible bus-devfn source addresses for DMA
6437  * requests in an IOMMU.  These aliases factor into IOMMU group creation
6438  * and are useful for devices generating DMA requests beyond or different
6439  * from their logical bus-devfn.  Examples include device quirks where the
6440  * device simply uses the wrong devfn, as well as non-transparent bridges
6441  * where the alias may be a proxy for devices in another domain.
6442  *
6443  * IOMMU group creation is performed during device discovery or addition,
6444  * prior to any potential DMA mapping and therefore prior to driver probing
6445  * (especially for userspace assigned devices where IOMMU group definition
6446  * cannot be left as a userspace activity).  DMA aliases should therefore
6447  * be configured via quirks, such as the PCI fixup header quirk.
6448  */
pci_add_dma_alias(struct pci_dev * dev,u8 devfn_from,unsigned int nr_devfns)6449 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from,
6450 		       unsigned int nr_devfns)
6451 {
6452 	int devfn_to;
6453 
6454 	nr_devfns = min(nr_devfns, (unsigned int)MAX_NR_DEVFNS - devfn_from);
6455 	devfn_to = devfn_from + nr_devfns - 1;
6456 
6457 	if (!dev->dma_alias_mask)
6458 		dev->dma_alias_mask = bitmap_zalloc(MAX_NR_DEVFNS, GFP_KERNEL);
6459 	if (!dev->dma_alias_mask) {
6460 		pci_warn(dev, "Unable to allocate DMA alias mask\n");
6461 		return;
6462 	}
6463 
6464 	bitmap_set(dev->dma_alias_mask, devfn_from, nr_devfns);
6465 
6466 	if (nr_devfns == 1)
6467 		pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
6468 				PCI_SLOT(devfn_from), PCI_FUNC(devfn_from));
6469 	else if (nr_devfns > 1)
6470 		pci_info(dev, "Enabling fixed DMA alias for devfn range from %02x.%d to %02x.%d\n",
6471 				PCI_SLOT(devfn_from), PCI_FUNC(devfn_from),
6472 				PCI_SLOT(devfn_to), PCI_FUNC(devfn_to));
6473 }
6474 
pci_devs_are_dma_aliases(struct pci_dev * dev1,struct pci_dev * dev2)6475 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
6476 {
6477 	return (dev1->dma_alias_mask &&
6478 		test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
6479 	       (dev2->dma_alias_mask &&
6480 		test_bit(dev1->devfn, dev2->dma_alias_mask)) ||
6481 	       pci_real_dma_dev(dev1) == dev2 ||
6482 	       pci_real_dma_dev(dev2) == dev1;
6483 }
6484 
pci_device_is_present(struct pci_dev * pdev)6485 bool pci_device_is_present(struct pci_dev *pdev)
6486 {
6487 	u32 v;
6488 
6489 	/* Check PF if pdev is a VF, since VF Vendor/Device IDs are 0xffff */
6490 	pdev = pci_physfn(pdev);
6491 	if (pci_dev_is_disconnected(pdev))
6492 		return false;
6493 	return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
6494 }
6495 EXPORT_SYMBOL_GPL(pci_device_is_present);
6496 
pci_ignore_hotplug(struct pci_dev * dev)6497 void pci_ignore_hotplug(struct pci_dev *dev)
6498 {
6499 	struct pci_dev *bridge = dev->bus->self;
6500 
6501 	dev->ignore_hotplug = 1;
6502 	/* Propagate the "ignore hotplug" setting to the parent bridge. */
6503 	if (bridge)
6504 		bridge->ignore_hotplug = 1;
6505 }
6506 EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
6507 
6508 /**
6509  * pci_real_dma_dev - Get PCI DMA device for PCI device
6510  * @dev: the PCI device that may have a PCI DMA alias
6511  *
6512  * Permits the platform to provide architecture-specific functionality to
6513  * devices needing to alias DMA to another PCI device on another PCI bus. If
6514  * the PCI device is on the same bus, it is recommended to use
6515  * pci_add_dma_alias(). This is the default implementation. Architecture
6516  * implementations can override this.
6517  */
pci_real_dma_dev(struct pci_dev * dev)6518 struct pci_dev __weak *pci_real_dma_dev(struct pci_dev *dev)
6519 {
6520 	return dev;
6521 }
6522 
pcibios_default_alignment(void)6523 resource_size_t __weak pcibios_default_alignment(void)
6524 {
6525 	return 0;
6526 }
6527 
6528 /*
6529  * Arches that don't want to expose struct resource to userland as-is in
6530  * sysfs and /proc can implement their own pci_resource_to_user().
6531  */
pci_resource_to_user(const struct pci_dev * dev,int bar,const struct resource * rsrc,resource_size_t * start,resource_size_t * end)6532 void __weak pci_resource_to_user(const struct pci_dev *dev, int bar,
6533 				 const struct resource *rsrc,
6534 				 resource_size_t *start, resource_size_t *end)
6535 {
6536 	*start = rsrc->start;
6537 	*end = rsrc->end;
6538 }
6539 
6540 static char *resource_alignment_param;
6541 static DEFINE_SPINLOCK(resource_alignment_lock);
6542 
6543 /**
6544  * pci_specified_resource_alignment - get resource alignment specified by user.
6545  * @dev: the PCI device to get
6546  * @resize: whether or not to change resources' size when reassigning alignment
6547  *
6548  * RETURNS: Resource alignment if it is specified.
6549  *          Zero if it is not specified.
6550  */
pci_specified_resource_alignment(struct pci_dev * dev,bool * resize)6551 static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
6552 							bool *resize)
6553 {
6554 	int align_order, count;
6555 	resource_size_t align = pcibios_default_alignment();
6556 	const char *p;
6557 	int ret;
6558 
6559 	spin_lock(&resource_alignment_lock);
6560 	p = resource_alignment_param;
6561 	if (!p || !*p)
6562 		goto out;
6563 	if (pci_has_flag(PCI_PROBE_ONLY)) {
6564 		align = 0;
6565 		pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
6566 		goto out;
6567 	}
6568 
6569 	while (*p) {
6570 		count = 0;
6571 		if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
6572 		    p[count] == '@') {
6573 			p += count + 1;
6574 			if (align_order > 63) {
6575 				pr_err("PCI: Invalid requested alignment (order %d)\n",
6576 				       align_order);
6577 				align_order = PAGE_SHIFT;
6578 			}
6579 		} else {
6580 			align_order = PAGE_SHIFT;
6581 		}
6582 
6583 		ret = pci_dev_str_match(dev, p, &p);
6584 		if (ret == 1) {
6585 			*resize = true;
6586 			align = 1ULL << align_order;
6587 			break;
6588 		} else if (ret < 0) {
6589 			pr_err("PCI: Can't parse resource_alignment parameter: %s\n",
6590 			       p);
6591 			break;
6592 		}
6593 
6594 		if (*p != ';' && *p != ',') {
6595 			/* End of param or invalid format */
6596 			break;
6597 		}
6598 		p++;
6599 	}
6600 out:
6601 	spin_unlock(&resource_alignment_lock);
6602 	return align;
6603 }
6604 
pci_request_resource_alignment(struct pci_dev * dev,int bar,resource_size_t align,bool resize)6605 static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
6606 					   resource_size_t align, bool resize)
6607 {
6608 	struct resource *r = &dev->resource[bar];
6609 	const char *r_name = pci_resource_name(dev, bar);
6610 	resource_size_t size;
6611 
6612 	if (!(r->flags & IORESOURCE_MEM))
6613 		return;
6614 
6615 	if (r->flags & IORESOURCE_PCI_FIXED) {
6616 		pci_info(dev, "%s %pR: ignoring requested alignment %#llx\n",
6617 			 r_name, r, (unsigned long long)align);
6618 		return;
6619 	}
6620 
6621 	size = resource_size(r);
6622 	if (size >= align)
6623 		return;
6624 
6625 	/*
6626 	 * Increase the alignment of the resource.  There are two ways we
6627 	 * can do this:
6628 	 *
6629 	 * 1) Increase the size of the resource.  BARs are aligned on their
6630 	 *    size, so when we reallocate space for this resource, we'll
6631 	 *    allocate it with the larger alignment.  This also prevents
6632 	 *    assignment of any other BARs inside the alignment region, so
6633 	 *    if we're requesting page alignment, this means no other BARs
6634 	 *    will share the page.
6635 	 *
6636 	 *    The disadvantage is that this makes the resource larger than
6637 	 *    the hardware BAR, which may break drivers that compute things
6638 	 *    based on the resource size, e.g., to find registers at a
6639 	 *    fixed offset before the end of the BAR.
6640 	 *
6641 	 * 2) Retain the resource size, but use IORESOURCE_STARTALIGN and
6642 	 *    set r->start to the desired alignment.  By itself this
6643 	 *    doesn't prevent other BARs being put inside the alignment
6644 	 *    region, but if we realign *every* resource of every device in
6645 	 *    the system, none of them will share an alignment region.
6646 	 *
6647 	 * When the user has requested alignment for only some devices via
6648 	 * the "pci=resource_alignment" argument, "resize" is true and we
6649 	 * use the first method.  Otherwise we assume we're aligning all
6650 	 * devices and we use the second.
6651 	 */
6652 
6653 	pci_info(dev, "%s %pR: requesting alignment to %#llx\n",
6654 		 r_name, r, (unsigned long long)align);
6655 
6656 	if (resize) {
6657 		r->start = 0;
6658 		r->end = align - 1;
6659 	} else {
6660 		r->flags &= ~IORESOURCE_SIZEALIGN;
6661 		r->flags |= IORESOURCE_STARTALIGN;
6662 		r->start = align;
6663 		r->end = r->start + size - 1;
6664 	}
6665 	r->flags |= IORESOURCE_UNSET;
6666 }
6667 
6668 /*
6669  * This function disables memory decoding and releases memory resources
6670  * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
6671  * It also rounds up size to specified alignment.
6672  * Later on, the kernel will assign page-aligned memory resource back
6673  * to the device.
6674  */
pci_reassigndev_resource_alignment(struct pci_dev * dev)6675 void pci_reassigndev_resource_alignment(struct pci_dev *dev)
6676 {
6677 	int i;
6678 	struct resource *r;
6679 	resource_size_t align;
6680 	u16 command;
6681 	bool resize = false;
6682 
6683 	/*
6684 	 * VF BARs are read-only zero according to SR-IOV spec r1.1, sec
6685 	 * 3.4.1.11.  Their resources are allocated from the space
6686 	 * described by the VF BARx register in the PF's SR-IOV capability.
6687 	 * We can't influence their alignment here.
6688 	 */
6689 	if (dev->is_virtfn)
6690 		return;
6691 
6692 	/* check if specified PCI is target device to reassign */
6693 	align = pci_specified_resource_alignment(dev, &resize);
6694 	if (!align)
6695 		return;
6696 
6697 	if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
6698 	    (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
6699 		pci_warn(dev, "Can't reassign resources to host bridge\n");
6700 		return;
6701 	}
6702 
6703 	pci_read_config_word(dev, PCI_COMMAND, &command);
6704 	command &= ~PCI_COMMAND_MEMORY;
6705 	pci_write_config_word(dev, PCI_COMMAND, command);
6706 
6707 	for (i = 0; i <= PCI_ROM_RESOURCE; i++)
6708 		pci_request_resource_alignment(dev, i, align, resize);
6709 
6710 	/*
6711 	 * Need to disable bridge's resource window,
6712 	 * to enable the kernel to reassign new resource
6713 	 * window later on.
6714 	 */
6715 	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
6716 		for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
6717 			r = &dev->resource[i];
6718 			if (!(r->flags & IORESOURCE_MEM))
6719 				continue;
6720 			r->flags |= IORESOURCE_UNSET;
6721 			r->end = resource_size(r) - 1;
6722 			r->start = 0;
6723 		}
6724 		pci_disable_bridge_window(dev);
6725 	}
6726 }
6727 
resource_alignment_show(const struct bus_type * bus,char * buf)6728 static ssize_t resource_alignment_show(const struct bus_type *bus, char *buf)
6729 {
6730 	size_t count = 0;
6731 
6732 	spin_lock(&resource_alignment_lock);
6733 	if (resource_alignment_param)
6734 		count = sysfs_emit(buf, "%s\n", resource_alignment_param);
6735 	spin_unlock(&resource_alignment_lock);
6736 
6737 	return count;
6738 }
6739 
resource_alignment_store(const struct bus_type * bus,const char * buf,size_t count)6740 static ssize_t resource_alignment_store(const struct bus_type *bus,
6741 					const char *buf, size_t count)
6742 {
6743 	char *param, *old, *end;
6744 
6745 	if (count >= (PAGE_SIZE - 1))
6746 		return -EINVAL;
6747 
6748 	param = kstrndup(buf, count, GFP_KERNEL);
6749 	if (!param)
6750 		return -ENOMEM;
6751 
6752 	end = strchr(param, '\n');
6753 	if (end)
6754 		*end = '\0';
6755 
6756 	spin_lock(&resource_alignment_lock);
6757 	old = resource_alignment_param;
6758 	if (strlen(param)) {
6759 		resource_alignment_param = param;
6760 	} else {
6761 		kfree(param);
6762 		resource_alignment_param = NULL;
6763 	}
6764 	spin_unlock(&resource_alignment_lock);
6765 
6766 	kfree(old);
6767 
6768 	return count;
6769 }
6770 
6771 static BUS_ATTR_RW(resource_alignment);
6772 
pci_resource_alignment_sysfs_init(void)6773 static int __init pci_resource_alignment_sysfs_init(void)
6774 {
6775 	return bus_create_file(&pci_bus_type,
6776 					&bus_attr_resource_alignment);
6777 }
6778 late_initcall(pci_resource_alignment_sysfs_init);
6779 
pci_no_domains(void)6780 static void pci_no_domains(void)
6781 {
6782 #ifdef CONFIG_PCI_DOMAINS
6783 	pci_domains_supported = 0;
6784 #endif
6785 }
6786 
6787 #ifdef CONFIG_PCI_DOMAINS_GENERIC
6788 static DEFINE_IDA(pci_domain_nr_static_ida);
6789 static DEFINE_IDA(pci_domain_nr_dynamic_ida);
6790 
of_pci_reserve_static_domain_nr(void)6791 static void of_pci_reserve_static_domain_nr(void)
6792 {
6793 	struct device_node *np;
6794 	int domain_nr;
6795 
6796 	for_each_node_by_type(np, "pci") {
6797 		domain_nr = of_get_pci_domain_nr(np);
6798 		if (domain_nr < 0)
6799 			continue;
6800 		/*
6801 		 * Permanently allocate domain_nr in dynamic_ida
6802 		 * to prevent it from dynamic allocation.
6803 		 */
6804 		ida_alloc_range(&pci_domain_nr_dynamic_ida,
6805 				domain_nr, domain_nr, GFP_KERNEL);
6806 	}
6807 }
6808 
of_pci_bus_find_domain_nr(struct device * parent)6809 static int of_pci_bus_find_domain_nr(struct device *parent)
6810 {
6811 	static bool static_domains_reserved = false;
6812 	int domain_nr;
6813 
6814 	/* On the first call scan device tree for static allocations. */
6815 	if (!static_domains_reserved) {
6816 		of_pci_reserve_static_domain_nr();
6817 		static_domains_reserved = true;
6818 	}
6819 
6820 	if (parent) {
6821 		/*
6822 		 * If domain is in DT, allocate it in static IDA.  This
6823 		 * prevents duplicate static allocations in case of errors
6824 		 * in DT.
6825 		 */
6826 		domain_nr = of_get_pci_domain_nr(parent->of_node);
6827 		if (domain_nr >= 0)
6828 			return ida_alloc_range(&pci_domain_nr_static_ida,
6829 					       domain_nr, domain_nr,
6830 					       GFP_KERNEL);
6831 	}
6832 
6833 	/*
6834 	 * If domain was not specified in DT, choose a free ID from dynamic
6835 	 * allocations. All domain numbers from DT are permanently in
6836 	 * dynamic allocations to prevent assigning them to other DT nodes
6837 	 * without static domain.
6838 	 */
6839 	return ida_alloc(&pci_domain_nr_dynamic_ida, GFP_KERNEL);
6840 }
6841 
of_pci_bus_release_domain_nr(struct device * parent,int domain_nr)6842 static void of_pci_bus_release_domain_nr(struct device *parent, int domain_nr)
6843 {
6844 	if (domain_nr < 0)
6845 		return;
6846 
6847 	/* Release domain from IDA where it was allocated. */
6848 	if (of_get_pci_domain_nr(parent->of_node) == domain_nr)
6849 		ida_free(&pci_domain_nr_static_ida, domain_nr);
6850 	else
6851 		ida_free(&pci_domain_nr_dynamic_ida, domain_nr);
6852 }
6853 
pci_bus_find_domain_nr(struct pci_bus * bus,struct device * parent)6854 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
6855 {
6856 	return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
6857 			       acpi_pci_bus_find_domain_nr(bus);
6858 }
6859 
pci_bus_release_domain_nr(struct device * parent,int domain_nr)6860 void pci_bus_release_domain_nr(struct device *parent, int domain_nr)
6861 {
6862 	if (!acpi_disabled)
6863 		return;
6864 	of_pci_bus_release_domain_nr(parent, domain_nr);
6865 }
6866 #endif
6867 
6868 /**
6869  * pci_ext_cfg_avail - can we access extended PCI config space?
6870  *
6871  * Returns 1 if we can access PCI extended config space (offsets
6872  * greater than 0xff). This is the default implementation. Architecture
6873  * implementations can override this.
6874  */
pci_ext_cfg_avail(void)6875 int __weak pci_ext_cfg_avail(void)
6876 {
6877 	return 1;
6878 }
6879 
pci_fixup_cardbus(struct pci_bus * bus)6880 void __weak pci_fixup_cardbus(struct pci_bus *bus)
6881 {
6882 }
6883 EXPORT_SYMBOL(pci_fixup_cardbus);
6884 
pci_setup(char * str)6885 static int __init pci_setup(char *str)
6886 {
6887 	while (str) {
6888 		char *k = strchr(str, ',');
6889 		if (k)
6890 			*k++ = 0;
6891 		if (*str && (str = pcibios_setup(str)) && *str) {
6892 			if (!strcmp(str, "nomsi")) {
6893 				pci_no_msi();
6894 			} else if (!strncmp(str, "noats", 5)) {
6895 				pr_info("PCIe: ATS is disabled\n");
6896 				pcie_ats_disabled = true;
6897 			} else if (!strcmp(str, "noaer")) {
6898 				pci_no_aer();
6899 			} else if (!strcmp(str, "earlydump")) {
6900 				pci_early_dump = true;
6901 			} else if (!strncmp(str, "realloc=", 8)) {
6902 				pci_realloc_get_opt(str + 8);
6903 			} else if (!strncmp(str, "realloc", 7)) {
6904 				pci_realloc_get_opt("on");
6905 			} else if (!strcmp(str, "nodomains")) {
6906 				pci_no_domains();
6907 			} else if (!strncmp(str, "noari", 5)) {
6908 				pcie_ari_disabled = true;
6909 			} else if (!strncmp(str, "cbiosize=", 9)) {
6910 				pci_cardbus_io_size = memparse(str + 9, &str);
6911 			} else if (!strncmp(str, "cbmemsize=", 10)) {
6912 				pci_cardbus_mem_size = memparse(str + 10, &str);
6913 			} else if (!strncmp(str, "resource_alignment=", 19)) {
6914 				resource_alignment_param = str + 19;
6915 			} else if (!strncmp(str, "ecrc=", 5)) {
6916 				pcie_ecrc_get_policy(str + 5);
6917 			} else if (!strncmp(str, "hpiosize=", 9)) {
6918 				pci_hotplug_io_size = memparse(str + 9, &str);
6919 			} else if (!strncmp(str, "hpmmiosize=", 11)) {
6920 				pci_hotplug_mmio_size = memparse(str + 11, &str);
6921 			} else if (!strncmp(str, "hpmmioprefsize=", 15)) {
6922 				pci_hotplug_mmio_pref_size = memparse(str + 15, &str);
6923 			} else if (!strncmp(str, "hpmemsize=", 10)) {
6924 				pci_hotplug_mmio_size = memparse(str + 10, &str);
6925 				pci_hotplug_mmio_pref_size = pci_hotplug_mmio_size;
6926 			} else if (!strncmp(str, "hpbussize=", 10)) {
6927 				pci_hotplug_bus_size =
6928 					simple_strtoul(str + 10, &str, 0);
6929 				if (pci_hotplug_bus_size > 0xff)
6930 					pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
6931 			} else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
6932 				pcie_bus_config = PCIE_BUS_TUNE_OFF;
6933 			} else if (!strncmp(str, "pcie_bus_safe", 13)) {
6934 				pcie_bus_config = PCIE_BUS_SAFE;
6935 			} else if (!strncmp(str, "pcie_bus_perf", 13)) {
6936 				pcie_bus_config = PCIE_BUS_PERFORMANCE;
6937 			} else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
6938 				pcie_bus_config = PCIE_BUS_PEER2PEER;
6939 			} else if (!strncmp(str, "pcie_scan_all", 13)) {
6940 				pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
6941 			} else if (!strncmp(str, "disable_acs_redir=", 18)) {
6942 				disable_acs_redir_param = str + 18;
6943 			} else if (!strncmp(str, "config_acs=", 11)) {
6944 				config_acs_param = str + 11;
6945 			} else {
6946 				pr_err("PCI: Unknown option `%s'\n", str);
6947 			}
6948 		}
6949 		str = k;
6950 	}
6951 	return 0;
6952 }
6953 early_param("pci", pci_setup);
6954 
6955 /*
6956  * 'resource_alignment_param' and 'disable_acs_redir_param' are initialized
6957  * in pci_setup(), above, to point to data in the __initdata section which
6958  * will be freed after the init sequence is complete. We can't allocate memory
6959  * in pci_setup() because some architectures do not have any memory allocation
6960  * service available during an early_param() call. So we allocate memory and
6961  * copy the variable here before the init section is freed.
6962  *
6963  */
pci_realloc_setup_params(void)6964 static int __init pci_realloc_setup_params(void)
6965 {
6966 	resource_alignment_param = kstrdup(resource_alignment_param,
6967 					   GFP_KERNEL);
6968 	disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
6969 	config_acs_param = kstrdup(config_acs_param, GFP_KERNEL);
6970 
6971 	return 0;
6972 }
6973 pure_initcall(pci_realloc_setup_params);
6974