• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * probe.c - PCI detection and setup code
3  */
4 
5 #include <linux/kernel.h>
6 #include <linux/delay.h>
7 #include <linux/init.h>
8 #include <linux/pci.h>
9 #include <linux/of_device.h>
10 #include <linux/of_pci.h>
11 #include <linux/pci_hotplug.h>
12 #include <linux/slab.h>
13 #include <linux/module.h>
14 #include <linux/cpumask.h>
15 #include <linux/pci-aspm.h>
16 #include <linux/aer.h>
17 #include <linux/acpi.h>
18 #include <linux/irqdomain.h>
19 #include <linux/pm_runtime.h>
20 #include "pci.h"
21 
22 #define CARDBUS_LATENCY_TIMER	176	/* secondary latency timer */
23 #define CARDBUS_RESERVE_BUSNR	3
24 
25 static struct resource busn_resource = {
26 	.name	= "PCI busn",
27 	.start	= 0,
28 	.end	= 255,
29 	.flags	= IORESOURCE_BUS,
30 };
31 
32 /* Ugh.  Need to stop exporting this to modules. */
33 LIST_HEAD(pci_root_buses);
34 EXPORT_SYMBOL(pci_root_buses);
35 
36 static LIST_HEAD(pci_domain_busn_res_list);
37 
38 struct pci_domain_busn_res {
39 	struct list_head list;
40 	struct resource res;
41 	int domain_nr;
42 };
43 
get_pci_domain_busn_res(int domain_nr)44 static struct resource *get_pci_domain_busn_res(int domain_nr)
45 {
46 	struct pci_domain_busn_res *r;
47 
48 	list_for_each_entry(r, &pci_domain_busn_res_list, list)
49 		if (r->domain_nr == domain_nr)
50 			return &r->res;
51 
52 	r = kzalloc(sizeof(*r), GFP_KERNEL);
53 	if (!r)
54 		return NULL;
55 
56 	r->domain_nr = domain_nr;
57 	r->res.start = 0;
58 	r->res.end = 0xff;
59 	r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED;
60 
61 	list_add_tail(&r->list, &pci_domain_busn_res_list);
62 
63 	return &r->res;
64 }
65 
find_anything(struct device * dev,void * data)66 static int find_anything(struct device *dev, void *data)
67 {
68 	return 1;
69 }
70 
71 /*
72  * Some device drivers need know if pci is initiated.
73  * Basically, we think pci is not initiated when there
74  * is no device to be found on the pci_bus_type.
75  */
no_pci_devices(void)76 int no_pci_devices(void)
77 {
78 	struct device *dev;
79 	int no_devices;
80 
81 	dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything);
82 	no_devices = (dev == NULL);
83 	put_device(dev);
84 	return no_devices;
85 }
86 EXPORT_SYMBOL(no_pci_devices);
87 
88 /*
89  * PCI Bus Class
90  */
release_pcibus_dev(struct device * dev)91 static void release_pcibus_dev(struct device *dev)
92 {
93 	struct pci_bus *pci_bus = to_pci_bus(dev);
94 
95 	put_device(pci_bus->bridge);
96 	pci_bus_remove_resources(pci_bus);
97 	pci_release_bus_of_node(pci_bus);
98 	kfree(pci_bus);
99 }
100 
101 static struct class pcibus_class = {
102 	.name		= "pci_bus",
103 	.dev_release	= &release_pcibus_dev,
104 	.dev_groups	= pcibus_groups,
105 };
106 
pcibus_class_init(void)107 static int __init pcibus_class_init(void)
108 {
109 	return class_register(&pcibus_class);
110 }
111 postcore_initcall(pcibus_class_init);
112 
pci_size(u64 base,u64 maxbase,u64 mask)113 static u64 pci_size(u64 base, u64 maxbase, u64 mask)
114 {
115 	u64 size = mask & maxbase;	/* Find the significant bits */
116 	if (!size)
117 		return 0;
118 
119 	/* Get the lowest of them to find the decode size, and
120 	   from that the extent.  */
121 	size = (size & ~(size-1)) - 1;
122 
123 	/* base == maxbase can be valid only if the BAR has
124 	   already been programmed with all 1s.  */
125 	if (base == maxbase && ((base | size) & mask) != mask)
126 		return 0;
127 
128 	return size;
129 }
130 
decode_bar(struct pci_dev * dev,u32 bar)131 static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
132 {
133 	u32 mem_type;
134 	unsigned long flags;
135 
136 	if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
137 		flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
138 		flags |= IORESOURCE_IO;
139 		return flags;
140 	}
141 
142 	flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
143 	flags |= IORESOURCE_MEM;
144 	if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
145 		flags |= IORESOURCE_PREFETCH;
146 
147 	mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
148 	switch (mem_type) {
149 	case PCI_BASE_ADDRESS_MEM_TYPE_32:
150 		break;
151 	case PCI_BASE_ADDRESS_MEM_TYPE_1M:
152 		/* 1M mem BAR treated as 32-bit BAR */
153 		break;
154 	case PCI_BASE_ADDRESS_MEM_TYPE_64:
155 		flags |= IORESOURCE_MEM_64;
156 		break;
157 	default:
158 		/* mem unknown type treated as 32-bit BAR */
159 		break;
160 	}
161 	return flags;
162 }
163 
164 #define PCI_COMMAND_DECODE_ENABLE	(PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
165 
166 /**
167  * pci_read_base - read a PCI BAR
168  * @dev: the PCI device
169  * @type: type of the BAR
170  * @res: resource buffer to be filled in
171  * @pos: BAR position in the config space
172  *
173  * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
174  */
__pci_read_base(struct pci_dev * dev,enum pci_bar_type type,struct resource * res,unsigned int pos)175 int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
176 		    struct resource *res, unsigned int pos)
177 {
178 	u32 l, sz, mask;
179 	u64 l64, sz64, mask64;
180 	u16 orig_cmd;
181 	struct pci_bus_region region, inverted_region;
182 
183 	mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
184 
185 	/* No printks while decoding is disabled! */
186 	if (!dev->mmio_always_on) {
187 		pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
188 		if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) {
189 			pci_write_config_word(dev, PCI_COMMAND,
190 				orig_cmd & ~PCI_COMMAND_DECODE_ENABLE);
191 		}
192 	}
193 
194 	res->name = pci_name(dev);
195 
196 	pci_read_config_dword(dev, pos, &l);
197 	pci_write_config_dword(dev, pos, l | mask);
198 	pci_read_config_dword(dev, pos, &sz);
199 	pci_write_config_dword(dev, pos, l);
200 
201 	/*
202 	 * All bits set in sz means the device isn't working properly.
203 	 * If the BAR isn't implemented, all bits must be 0.  If it's a
204 	 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
205 	 * 1 must be clear.
206 	 */
207 	if (sz == 0xffffffff)
208 		sz = 0;
209 
210 	/*
211 	 * I don't know how l can have all bits set.  Copied from old code.
212 	 * Maybe it fixes a bug on some ancient platform.
213 	 */
214 	if (l == 0xffffffff)
215 		l = 0;
216 
217 	if (type == pci_bar_unknown) {
218 		res->flags = decode_bar(dev, l);
219 		res->flags |= IORESOURCE_SIZEALIGN;
220 		if (res->flags & IORESOURCE_IO) {
221 			l64 = l & PCI_BASE_ADDRESS_IO_MASK;
222 			sz64 = sz & PCI_BASE_ADDRESS_IO_MASK;
223 			mask64 = PCI_BASE_ADDRESS_IO_MASK & (u32)IO_SPACE_LIMIT;
224 		} else {
225 			l64 = l & PCI_BASE_ADDRESS_MEM_MASK;
226 			sz64 = sz & PCI_BASE_ADDRESS_MEM_MASK;
227 			mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK;
228 		}
229 	} else {
230 		if (l & PCI_ROM_ADDRESS_ENABLE)
231 			res->flags |= IORESOURCE_ROM_ENABLE;
232 		l64 = l & PCI_ROM_ADDRESS_MASK;
233 		sz64 = sz & PCI_ROM_ADDRESS_MASK;
234 		mask64 = PCI_ROM_ADDRESS_MASK;
235 	}
236 
237 	if (res->flags & IORESOURCE_MEM_64) {
238 		pci_read_config_dword(dev, pos + 4, &l);
239 		pci_write_config_dword(dev, pos + 4, ~0);
240 		pci_read_config_dword(dev, pos + 4, &sz);
241 		pci_write_config_dword(dev, pos + 4, l);
242 
243 		l64 |= ((u64)l << 32);
244 		sz64 |= ((u64)sz << 32);
245 		mask64 |= ((u64)~0 << 32);
246 	}
247 
248 	if (!dev->mmio_always_on && (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
249 		pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
250 
251 	if (!sz64)
252 		goto fail;
253 
254 	sz64 = pci_size(l64, sz64, mask64);
255 	if (!sz64) {
256 		dev_info(&dev->dev, FW_BUG "reg 0x%x: invalid BAR (can't size)\n",
257 			 pos);
258 		goto fail;
259 	}
260 
261 	if (res->flags & IORESOURCE_MEM_64) {
262 		if ((sizeof(pci_bus_addr_t) < 8 || sizeof(resource_size_t) < 8)
263 		    && sz64 > 0x100000000ULL) {
264 			res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
265 			res->start = 0;
266 			res->end = 0;
267 			dev_err(&dev->dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n",
268 				pos, (unsigned long long)sz64);
269 			goto out;
270 		}
271 
272 		if ((sizeof(pci_bus_addr_t) < 8) && l) {
273 			/* Above 32-bit boundary; try to reallocate */
274 			res->flags |= IORESOURCE_UNSET;
275 			res->start = 0;
276 			res->end = sz64;
277 			dev_info(&dev->dev, "reg 0x%x: can't handle BAR above 4GB (bus address %#010llx)\n",
278 				 pos, (unsigned long long)l64);
279 			goto out;
280 		}
281 	}
282 
283 	region.start = l64;
284 	region.end = l64 + sz64;
285 
286 	pcibios_bus_to_resource(dev->bus, res, &region);
287 	pcibios_resource_to_bus(dev->bus, &inverted_region, res);
288 
289 	/*
290 	 * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is
291 	 * the corresponding resource address (the physical address used by
292 	 * the CPU.  Converting that resource address back to a bus address
293 	 * should yield the original BAR value:
294 	 *
295 	 *     resource_to_bus(bus_to_resource(A)) == A
296 	 *
297 	 * If it doesn't, CPU accesses to "bus_to_resource(A)" will not
298 	 * be claimed by the device.
299 	 */
300 	if (inverted_region.start != region.start) {
301 		res->flags |= IORESOURCE_UNSET;
302 		res->start = 0;
303 		res->end = region.end - region.start;
304 		dev_info(&dev->dev, "reg 0x%x: initial BAR value %#010llx invalid\n",
305 			 pos, (unsigned long long)region.start);
306 	}
307 
308 	goto out;
309 
310 
311 fail:
312 	res->flags = 0;
313 out:
314 	if (res->flags)
315 		dev_printk(KERN_DEBUG, &dev->dev, "reg 0x%x: %pR\n", pos, res);
316 
317 	return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
318 }
319 
pci_read_bases(struct pci_dev * dev,unsigned int howmany,int rom)320 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
321 {
322 	unsigned int pos, reg;
323 
324 	if (dev->non_compliant_bars)
325 		return;
326 
327 	for (pos = 0; pos < howmany; pos++) {
328 		struct resource *res = &dev->resource[pos];
329 		reg = PCI_BASE_ADDRESS_0 + (pos << 2);
330 		pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
331 	}
332 
333 	if (rom) {
334 		struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
335 		dev->rom_base_reg = rom;
336 		res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
337 				IORESOURCE_READONLY | IORESOURCE_SIZEALIGN;
338 		__pci_read_base(dev, pci_bar_mem32, res, rom);
339 	}
340 }
341 
pci_read_bridge_io(struct pci_bus * child)342 static void pci_read_bridge_io(struct pci_bus *child)
343 {
344 	struct pci_dev *dev = child->self;
345 	u8 io_base_lo, io_limit_lo;
346 	unsigned long io_mask, io_granularity, base, limit;
347 	struct pci_bus_region region;
348 	struct resource *res;
349 
350 	io_mask = PCI_IO_RANGE_MASK;
351 	io_granularity = 0x1000;
352 	if (dev->io_window_1k) {
353 		/* Support 1K I/O space granularity */
354 		io_mask = PCI_IO_1K_RANGE_MASK;
355 		io_granularity = 0x400;
356 	}
357 
358 	res = child->resource[0];
359 	pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
360 	pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
361 	base = (io_base_lo & io_mask) << 8;
362 	limit = (io_limit_lo & io_mask) << 8;
363 
364 	if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
365 		u16 io_base_hi, io_limit_hi;
366 
367 		pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
368 		pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
369 		base |= ((unsigned long) io_base_hi << 16);
370 		limit |= ((unsigned long) io_limit_hi << 16);
371 	}
372 
373 	if (base <= limit) {
374 		res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
375 		region.start = base;
376 		region.end = limit + io_granularity - 1;
377 		pcibios_bus_to_resource(dev->bus, res, &region);
378 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
379 	}
380 }
381 
pci_read_bridge_mmio(struct pci_bus * child)382 static void pci_read_bridge_mmio(struct pci_bus *child)
383 {
384 	struct pci_dev *dev = child->self;
385 	u16 mem_base_lo, mem_limit_lo;
386 	unsigned long base, limit;
387 	struct pci_bus_region region;
388 	struct resource *res;
389 
390 	res = child->resource[1];
391 	pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
392 	pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
393 	base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
394 	limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
395 	if (base <= limit) {
396 		res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
397 		region.start = base;
398 		region.end = limit + 0xfffff;
399 		pcibios_bus_to_resource(dev->bus, res, &region);
400 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
401 	}
402 }
403 
pci_read_bridge_mmio_pref(struct pci_bus * child)404 static void pci_read_bridge_mmio_pref(struct pci_bus *child)
405 {
406 	struct pci_dev *dev = child->self;
407 	u16 mem_base_lo, mem_limit_lo;
408 	u64 base64, limit64;
409 	pci_bus_addr_t base, limit;
410 	struct pci_bus_region region;
411 	struct resource *res;
412 
413 	res = child->resource[2];
414 	pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
415 	pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
416 	base64 = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
417 	limit64 = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
418 
419 	if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
420 		u32 mem_base_hi, mem_limit_hi;
421 
422 		pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
423 		pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
424 
425 		/*
426 		 * Some bridges set the base > limit by default, and some
427 		 * (broken) BIOSes do not initialize them.  If we find
428 		 * this, just assume they are not being used.
429 		 */
430 		if (mem_base_hi <= mem_limit_hi) {
431 			base64 |= (u64) mem_base_hi << 32;
432 			limit64 |= (u64) mem_limit_hi << 32;
433 		}
434 	}
435 
436 	base = (pci_bus_addr_t) base64;
437 	limit = (pci_bus_addr_t) limit64;
438 
439 	if (base != base64) {
440 		dev_err(&dev->dev, "can't handle bridge window above 4GB (bus address %#010llx)\n",
441 			(unsigned long long) base64);
442 		return;
443 	}
444 
445 	if (base <= limit) {
446 		res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
447 					 IORESOURCE_MEM | IORESOURCE_PREFETCH;
448 		if (res->flags & PCI_PREF_RANGE_TYPE_64)
449 			res->flags |= IORESOURCE_MEM_64;
450 		region.start = base;
451 		region.end = limit + 0xfffff;
452 		pcibios_bus_to_resource(dev->bus, res, &region);
453 		dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
454 	}
455 }
456 
pci_read_bridge_bases(struct pci_bus * child)457 void pci_read_bridge_bases(struct pci_bus *child)
458 {
459 	struct pci_dev *dev = child->self;
460 	struct resource *res;
461 	int i;
462 
463 	if (pci_is_root_bus(child))	/* It's a host bus, nothing to read */
464 		return;
465 
466 	dev_info(&dev->dev, "PCI bridge to %pR%s\n",
467 		 &child->busn_res,
468 		 dev->transparent ? " (subtractive decode)" : "");
469 
470 	pci_bus_remove_resources(child);
471 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
472 		child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
473 
474 	pci_read_bridge_io(child);
475 	pci_read_bridge_mmio(child);
476 	pci_read_bridge_mmio_pref(child);
477 
478 	if (dev->transparent) {
479 		pci_bus_for_each_resource(child->parent, res, i) {
480 			if (res && res->flags) {
481 				pci_bus_add_resource(child, res,
482 						     PCI_SUBTRACTIVE_DECODE);
483 				dev_printk(KERN_DEBUG, &dev->dev,
484 					   "  bridge window %pR (subtractive decode)\n",
485 					   res);
486 			}
487 		}
488 	}
489 }
490 
pci_alloc_bus(struct pci_bus * parent)491 static struct pci_bus *pci_alloc_bus(struct pci_bus *parent)
492 {
493 	struct pci_bus *b;
494 
495 	b = kzalloc(sizeof(*b), GFP_KERNEL);
496 	if (!b)
497 		return NULL;
498 
499 	INIT_LIST_HEAD(&b->node);
500 	INIT_LIST_HEAD(&b->children);
501 	INIT_LIST_HEAD(&b->devices);
502 	INIT_LIST_HEAD(&b->slots);
503 	INIT_LIST_HEAD(&b->resources);
504 	b->max_bus_speed = PCI_SPEED_UNKNOWN;
505 	b->cur_bus_speed = PCI_SPEED_UNKNOWN;
506 #ifdef CONFIG_PCI_DOMAINS_GENERIC
507 	if (parent)
508 		b->domain_nr = parent->domain_nr;
509 #endif
510 	return b;
511 }
512 
pci_release_host_bridge_dev(struct device * dev)513 static void pci_release_host_bridge_dev(struct device *dev)
514 {
515 	struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
516 
517 	if (bridge->release_fn)
518 		bridge->release_fn(bridge);
519 
520 	pci_free_resource_list(&bridge->windows);
521 
522 	kfree(bridge);
523 }
524 
pci_alloc_host_bridge(struct pci_bus * b)525 static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b)
526 {
527 	struct pci_host_bridge *bridge;
528 
529 	bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
530 	if (!bridge)
531 		return NULL;
532 
533 	INIT_LIST_HEAD(&bridge->windows);
534 	bridge->bus = b;
535 	return bridge;
536 }
537 
538 static const unsigned char pcix_bus_speed[] = {
539 	PCI_SPEED_UNKNOWN,		/* 0 */
540 	PCI_SPEED_66MHz_PCIX,		/* 1 */
541 	PCI_SPEED_100MHz_PCIX,		/* 2 */
542 	PCI_SPEED_133MHz_PCIX,		/* 3 */
543 	PCI_SPEED_UNKNOWN,		/* 4 */
544 	PCI_SPEED_66MHz_PCIX_ECC,	/* 5 */
545 	PCI_SPEED_100MHz_PCIX_ECC,	/* 6 */
546 	PCI_SPEED_133MHz_PCIX_ECC,	/* 7 */
547 	PCI_SPEED_UNKNOWN,		/* 8 */
548 	PCI_SPEED_66MHz_PCIX_266,	/* 9 */
549 	PCI_SPEED_100MHz_PCIX_266,	/* A */
550 	PCI_SPEED_133MHz_PCIX_266,	/* B */
551 	PCI_SPEED_UNKNOWN,		/* C */
552 	PCI_SPEED_66MHz_PCIX_533,	/* D */
553 	PCI_SPEED_100MHz_PCIX_533,	/* E */
554 	PCI_SPEED_133MHz_PCIX_533	/* F */
555 };
556 
557 const unsigned char pcie_link_speed[] = {
558 	PCI_SPEED_UNKNOWN,		/* 0 */
559 	PCIE_SPEED_2_5GT,		/* 1 */
560 	PCIE_SPEED_5_0GT,		/* 2 */
561 	PCIE_SPEED_8_0GT,		/* 3 */
562 	PCI_SPEED_UNKNOWN,		/* 4 */
563 	PCI_SPEED_UNKNOWN,		/* 5 */
564 	PCI_SPEED_UNKNOWN,		/* 6 */
565 	PCI_SPEED_UNKNOWN,		/* 7 */
566 	PCI_SPEED_UNKNOWN,		/* 8 */
567 	PCI_SPEED_UNKNOWN,		/* 9 */
568 	PCI_SPEED_UNKNOWN,		/* A */
569 	PCI_SPEED_UNKNOWN,		/* B */
570 	PCI_SPEED_UNKNOWN,		/* C */
571 	PCI_SPEED_UNKNOWN,		/* D */
572 	PCI_SPEED_UNKNOWN,		/* E */
573 	PCI_SPEED_UNKNOWN		/* F */
574 };
575 
pcie_update_link_speed(struct pci_bus * bus,u16 linksta)576 void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
577 {
578 	bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS];
579 }
580 EXPORT_SYMBOL_GPL(pcie_update_link_speed);
581 
582 static unsigned char agp_speeds[] = {
583 	AGP_UNKNOWN,
584 	AGP_1X,
585 	AGP_2X,
586 	AGP_4X,
587 	AGP_8X
588 };
589 
agp_speed(int agp3,int agpstat)590 static enum pci_bus_speed agp_speed(int agp3, int agpstat)
591 {
592 	int index = 0;
593 
594 	if (agpstat & 4)
595 		index = 3;
596 	else if (agpstat & 2)
597 		index = 2;
598 	else if (agpstat & 1)
599 		index = 1;
600 	else
601 		goto out;
602 
603 	if (agp3) {
604 		index += 2;
605 		if (index == 5)
606 			index = 0;
607 	}
608 
609  out:
610 	return agp_speeds[index];
611 }
612 
pci_set_bus_speed(struct pci_bus * bus)613 static void pci_set_bus_speed(struct pci_bus *bus)
614 {
615 	struct pci_dev *bridge = bus->self;
616 	int pos;
617 
618 	pos = pci_find_capability(bridge, PCI_CAP_ID_AGP);
619 	if (!pos)
620 		pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3);
621 	if (pos) {
622 		u32 agpstat, agpcmd;
623 
624 		pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat);
625 		bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7);
626 
627 		pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd);
628 		bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7);
629 	}
630 
631 	pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
632 	if (pos) {
633 		u16 status;
634 		enum pci_bus_speed max;
635 
636 		pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS,
637 				     &status);
638 
639 		if (status & PCI_X_SSTATUS_533MHZ) {
640 			max = PCI_SPEED_133MHz_PCIX_533;
641 		} else if (status & PCI_X_SSTATUS_266MHZ) {
642 			max = PCI_SPEED_133MHz_PCIX_266;
643 		} else if (status & PCI_X_SSTATUS_133MHZ) {
644 			if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2)
645 				max = PCI_SPEED_133MHz_PCIX_ECC;
646 			else
647 				max = PCI_SPEED_133MHz_PCIX;
648 		} else {
649 			max = PCI_SPEED_66MHz_PCIX;
650 		}
651 
652 		bus->max_bus_speed = max;
653 		bus->cur_bus_speed = pcix_bus_speed[
654 			(status & PCI_X_SSTATUS_FREQ) >> 6];
655 
656 		return;
657 	}
658 
659 	if (pci_is_pcie(bridge)) {
660 		u32 linkcap;
661 		u16 linksta;
662 
663 		pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
664 		bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS];
665 
666 		pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
667 		pcie_update_link_speed(bus, linksta);
668 	}
669 }
670 
pci_host_bridge_msi_domain(struct pci_bus * bus)671 static struct irq_domain *pci_host_bridge_msi_domain(struct pci_bus *bus)
672 {
673 	struct irq_domain *d;
674 
675 	/*
676 	 * Any firmware interface that can resolve the msi_domain
677 	 * should be called from here.
678 	 */
679 	d = pci_host_bridge_of_msi_domain(bus);
680 	if (!d)
681 		d = pci_host_bridge_acpi_msi_domain(bus);
682 
683 #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
684 	/*
685 	 * If no IRQ domain was found via the OF tree, try looking it up
686 	 * directly through the fwnode_handle.
687 	 */
688 	if (!d) {
689 		struct fwnode_handle *fwnode = pci_root_bus_fwnode(bus);
690 
691 		if (fwnode)
692 			d = irq_find_matching_fwnode(fwnode,
693 						     DOMAIN_BUS_PCI_MSI);
694 	}
695 #endif
696 
697 	return d;
698 }
699 
pci_set_bus_msi_domain(struct pci_bus * bus)700 static void pci_set_bus_msi_domain(struct pci_bus *bus)
701 {
702 	struct irq_domain *d;
703 	struct pci_bus *b;
704 
705 	/*
706 	 * The bus can be a root bus, a subordinate bus, or a virtual bus
707 	 * created by an SR-IOV device.  Walk up to the first bridge device
708 	 * found or derive the domain from the host bridge.
709 	 */
710 	for (b = bus, d = NULL; !d && !pci_is_root_bus(b); b = b->parent) {
711 		if (b->self)
712 			d = dev_get_msi_domain(&b->self->dev);
713 	}
714 
715 	if (!d)
716 		d = pci_host_bridge_msi_domain(b);
717 
718 	dev_set_msi_domain(&bus->dev, d);
719 }
720 
pci_alloc_child_bus(struct pci_bus * parent,struct pci_dev * bridge,int busnr)721 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
722 					   struct pci_dev *bridge, int busnr)
723 {
724 	struct pci_bus *child;
725 	int i;
726 	int ret;
727 
728 	/*
729 	 * Allocate a new bus, and inherit stuff from the parent..
730 	 */
731 	child = pci_alloc_bus(parent);
732 	if (!child)
733 		return NULL;
734 
735 	child->parent = parent;
736 	child->ops = parent->ops;
737 	child->msi = parent->msi;
738 	child->sysdata = parent->sysdata;
739 	child->bus_flags = parent->bus_flags;
740 
741 	/* initialize some portions of the bus device, but don't register it
742 	 * now as the parent is not properly set up yet.
743 	 */
744 	child->dev.class = &pcibus_class;
745 	dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
746 
747 	/*
748 	 * Set up the primary, secondary and subordinate
749 	 * bus numbers.
750 	 */
751 	child->number = child->busn_res.start = busnr;
752 	child->primary = parent->busn_res.start;
753 	child->busn_res.end = 0xff;
754 
755 	if (!bridge) {
756 		child->dev.parent = parent->bridge;
757 		goto add_dev;
758 	}
759 
760 	child->self = bridge;
761 	child->bridge = get_device(&bridge->dev);
762 	child->dev.parent = child->bridge;
763 	pci_set_bus_of_node(child);
764 	pci_set_bus_speed(child);
765 
766 	/* Set up default resource pointers and names.. */
767 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
768 		child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
769 		child->resource[i]->name = child->name;
770 	}
771 	bridge->subordinate = child;
772 
773 add_dev:
774 	pci_set_bus_msi_domain(child);
775 	ret = device_register(&child->dev);
776 	WARN_ON(ret < 0);
777 
778 	pcibios_add_bus(child);
779 
780 	if (child->ops->add_bus) {
781 		ret = child->ops->add_bus(child);
782 		if (WARN_ON(ret < 0))
783 			dev_err(&child->dev, "failed to add bus: %d\n", ret);
784 	}
785 
786 	/* Create legacy_io and legacy_mem files for this bus */
787 	pci_create_legacy_files(child);
788 
789 	return child;
790 }
791 
pci_add_new_bus(struct pci_bus * parent,struct pci_dev * dev,int busnr)792 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
793 				int busnr)
794 {
795 	struct pci_bus *child;
796 
797 	child = pci_alloc_child_bus(parent, dev, busnr);
798 	if (child) {
799 		down_write(&pci_bus_sem);
800 		list_add_tail(&child->node, &parent->children);
801 		up_write(&pci_bus_sem);
802 	}
803 	return child;
804 }
805 EXPORT_SYMBOL(pci_add_new_bus);
806 
pci_enable_crs(struct pci_dev * pdev)807 static void pci_enable_crs(struct pci_dev *pdev)
808 {
809 	u16 root_cap = 0;
810 
811 	/* Enable CRS Software Visibility if supported */
812 	pcie_capability_read_word(pdev, PCI_EXP_RTCAP, &root_cap);
813 	if (root_cap & PCI_EXP_RTCAP_CRSVIS)
814 		pcie_capability_set_word(pdev, PCI_EXP_RTCTL,
815 					 PCI_EXP_RTCTL_CRSSVE);
816 }
817 
818 /*
819  * If it's a bridge, configure it and scan the bus behind it.
820  * For CardBus bridges, we don't scan behind as the devices will
821  * be handled by the bridge driver itself.
822  *
823  * We need to process bridges in two passes -- first we scan those
824  * already configured by the BIOS and after we are done with all of
825  * them, we proceed to assigning numbers to the remaining buses in
826  * order to avoid overlaps between old and new bus numbers.
827  */
pci_scan_bridge(struct pci_bus * bus,struct pci_dev * dev,int max,int pass)828 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
829 {
830 	struct pci_bus *child;
831 	int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
832 	u32 buses, i, j = 0;
833 	u16 bctl;
834 	u8 primary, secondary, subordinate;
835 	int broken = 0;
836 
837 	/*
838 	 * Make sure the bridge is powered on to be able to access config
839 	 * space of devices below it.
840 	 */
841 	pm_runtime_get_sync(&dev->dev);
842 
843 	pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
844 	primary = buses & 0xFF;
845 	secondary = (buses >> 8) & 0xFF;
846 	subordinate = (buses >> 16) & 0xFF;
847 
848 	dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
849 		secondary, subordinate, pass);
850 
851 	if (!primary && (primary != bus->number) && secondary && subordinate) {
852 		dev_warn(&dev->dev, "Primary bus is hard wired to 0\n");
853 		primary = bus->number;
854 	}
855 
856 	/* Check if setup is sensible at all */
857 	if (!pass &&
858 	    (primary != bus->number || secondary <= bus->number ||
859 	     secondary > subordinate)) {
860 		dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
861 			 secondary, subordinate);
862 		broken = 1;
863 	}
864 
865 	/* Disable MasterAbortMode during probing to avoid reporting
866 	   of bus errors (in some architectures) */
867 	pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
868 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
869 			      bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
870 
871 	pci_enable_crs(dev);
872 
873 	if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
874 	    !is_cardbus && !broken) {
875 		unsigned int cmax;
876 		/*
877 		 * Bus already configured by firmware, process it in the first
878 		 * pass and just note the configuration.
879 		 */
880 		if (pass)
881 			goto out;
882 
883 		/*
884 		 * The bus might already exist for two reasons: Either we are
885 		 * rescanning the bus or the bus is reachable through more than
886 		 * one bridge. The second case can happen with the i450NX
887 		 * chipset.
888 		 */
889 		child = pci_find_bus(pci_domain_nr(bus), secondary);
890 		if (!child) {
891 			child = pci_add_new_bus(bus, dev, secondary);
892 			if (!child)
893 				goto out;
894 			child->primary = primary;
895 			pci_bus_insert_busn_res(child, secondary, subordinate);
896 			child->bridge_ctl = bctl;
897 		}
898 
899 		cmax = pci_scan_child_bus(child);
900 		if (cmax > subordinate)
901 			dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n",
902 				 subordinate, cmax);
903 		/* subordinate should equal child->busn_res.end */
904 		if (subordinate > max)
905 			max = subordinate;
906 	} else {
907 		/*
908 		 * We need to assign a number to this bus which we always
909 		 * do in the second pass.
910 		 */
911 		if (!pass) {
912 			if (pcibios_assign_all_busses() || broken || is_cardbus)
913 				/* Temporarily disable forwarding of the
914 				   configuration cycles on all bridges in
915 				   this bus segment to avoid possible
916 				   conflicts in the second pass between two
917 				   bridges programmed with overlapping
918 				   bus ranges. */
919 				pci_write_config_dword(dev, PCI_PRIMARY_BUS,
920 						       buses & ~0xffffff);
921 			goto out;
922 		}
923 
924 		/* Clear errors */
925 		pci_write_config_word(dev, PCI_STATUS, 0xffff);
926 
927 		/* Prevent assigning a bus number that already exists.
928 		 * This can happen when a bridge is hot-plugged, so in
929 		 * this case we only re-scan this bus. */
930 		child = pci_find_bus(pci_domain_nr(bus), max+1);
931 		if (!child) {
932 			child = pci_add_new_bus(bus, dev, max+1);
933 			if (!child)
934 				goto out;
935 			pci_bus_insert_busn_res(child, max+1,
936 						bus->busn_res.end);
937 		}
938 		max++;
939 		buses = (buses & 0xff000000)
940 		      | ((unsigned int)(child->primary)     <<  0)
941 		      | ((unsigned int)(child->busn_res.start)   <<  8)
942 		      | ((unsigned int)(child->busn_res.end) << 16);
943 
944 		/*
945 		 * yenta.c forces a secondary latency timer of 176.
946 		 * Copy that behaviour here.
947 		 */
948 		if (is_cardbus) {
949 			buses &= ~0xff000000;
950 			buses |= CARDBUS_LATENCY_TIMER << 24;
951 		}
952 
953 		/*
954 		 * We need to blast all three values with a single write.
955 		 */
956 		pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
957 
958 		if (!is_cardbus) {
959 			child->bridge_ctl = bctl;
960 			max = pci_scan_child_bus(child);
961 		} else {
962 			/*
963 			 * For CardBus bridges, we leave 4 bus numbers
964 			 * as cards with a PCI-to-PCI bridge can be
965 			 * inserted later.
966 			 */
967 			for (i = 0; i < CARDBUS_RESERVE_BUSNR; i++) {
968 				struct pci_bus *parent = bus;
969 				if (pci_find_bus(pci_domain_nr(bus),
970 							max+i+1))
971 					break;
972 				while (parent->parent) {
973 					if ((!pcibios_assign_all_busses()) &&
974 					    (parent->busn_res.end > max) &&
975 					    (parent->busn_res.end <= max+i)) {
976 						j = 1;
977 					}
978 					parent = parent->parent;
979 				}
980 				if (j) {
981 					/*
982 					 * Often, there are two cardbus bridges
983 					 * -- try to leave one valid bus number
984 					 * for each one.
985 					 */
986 					i /= 2;
987 					break;
988 				}
989 			}
990 			max += i;
991 		}
992 		/*
993 		 * Set the subordinate bus number to its real value.
994 		 */
995 		pci_bus_update_busn_res_end(child, max);
996 		pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
997 	}
998 
999 	sprintf(child->name,
1000 		(is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
1001 		pci_domain_nr(bus), child->number);
1002 
1003 	/* Has only triggered on CardBus, fixup is in yenta_socket */
1004 	while (bus->parent) {
1005 		if ((child->busn_res.end > bus->busn_res.end) ||
1006 		    (child->number > bus->busn_res.end) ||
1007 		    (child->number < bus->number) ||
1008 		    (child->busn_res.end < bus->number)) {
1009 			dev_info(&child->dev, "%pR %s hidden behind%s bridge %s %pR\n",
1010 				&child->busn_res,
1011 				(bus->number > child->busn_res.end &&
1012 				 bus->busn_res.end < child->number) ?
1013 					"wholly" : "partially",
1014 				bus->self->transparent ? " transparent" : "",
1015 				dev_name(&bus->dev),
1016 				&bus->busn_res);
1017 		}
1018 		bus = bus->parent;
1019 	}
1020 
1021 out:
1022 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
1023 
1024 	pm_runtime_put(&dev->dev);
1025 
1026 	return max;
1027 }
1028 EXPORT_SYMBOL(pci_scan_bridge);
1029 
1030 /*
1031  * Read interrupt line and base address registers.
1032  * The architecture-dependent code can tweak these, of course.
1033  */
pci_read_irq(struct pci_dev * dev)1034 static void pci_read_irq(struct pci_dev *dev)
1035 {
1036 	unsigned char irq;
1037 
1038 	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
1039 	dev->pin = irq;
1040 	if (irq)
1041 		pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
1042 	dev->irq = irq;
1043 }
1044 
set_pcie_port_type(struct pci_dev * pdev)1045 void set_pcie_port_type(struct pci_dev *pdev)
1046 {
1047 	int pos;
1048 	u16 reg16;
1049 	int type;
1050 	struct pci_dev *parent;
1051 
1052 	pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
1053 	if (!pos)
1054 		return;
1055 
1056 	pdev->pcie_cap = pos;
1057 	pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
1058 	pdev->pcie_flags_reg = reg16;
1059 	pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
1060 	pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
1061 
1062 	/*
1063 	 * A Root Port or a PCI-to-PCIe bridge is always the upstream end
1064 	 * of a Link.  No PCIe component has two Links.  Two Links are
1065 	 * connected by a Switch that has a Port on each Link and internal
1066 	 * logic to connect the two Ports.
1067 	 */
1068 	type = pci_pcie_type(pdev);
1069 	if (type == PCI_EXP_TYPE_ROOT_PORT ||
1070 	    type == PCI_EXP_TYPE_PCIE_BRIDGE)
1071 		pdev->has_secondary_link = 1;
1072 	else if (type == PCI_EXP_TYPE_UPSTREAM ||
1073 		 type == PCI_EXP_TYPE_DOWNSTREAM) {
1074 		parent = pci_upstream_bridge(pdev);
1075 
1076 		/*
1077 		 * Usually there's an upstream device (Root Port or Switch
1078 		 * Downstream Port), but we can't assume one exists.
1079 		 */
1080 		if (parent && !parent->has_secondary_link)
1081 			pdev->has_secondary_link = 1;
1082 	}
1083 }
1084 
set_pcie_hotplug_bridge(struct pci_dev * pdev)1085 void set_pcie_hotplug_bridge(struct pci_dev *pdev)
1086 {
1087 	u32 reg32;
1088 
1089 	pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32);
1090 	if (reg32 & PCI_EXP_SLTCAP_HPC)
1091 		pdev->is_hotplug_bridge = 1;
1092 }
1093 
1094 /**
1095  * pci_ext_cfg_is_aliased - is ext config space just an alias of std config?
1096  * @dev: PCI device
1097  *
1098  * PCI Express to PCI/PCI-X Bridge Specification, rev 1.0, 4.1.4 says that
1099  * when forwarding a type1 configuration request the bridge must check that
1100  * the extended register address field is zero.  The bridge is not permitted
1101  * to forward the transactions and must handle it as an Unsupported Request.
1102  * Some bridges do not follow this rule and simply drop the extended register
1103  * bits, resulting in the standard config space being aliased, every 256
1104  * bytes across the entire configuration space.  Test for this condition by
1105  * comparing the first dword of each potential alias to the vendor/device ID.
1106  * Known offenders:
1107  *   ASM1083/1085 PCIe-to-PCI Reversible Bridge (1b21:1080, rev 01 & 03)
1108  *   AMD/ATI SBx00 PCI to PCI Bridge (1002:4384, rev 40)
1109  */
pci_ext_cfg_is_aliased(struct pci_dev * dev)1110 static bool pci_ext_cfg_is_aliased(struct pci_dev *dev)
1111 {
1112 #ifdef CONFIG_PCI_QUIRKS
1113 	int pos;
1114 	u32 header, tmp;
1115 
1116 	pci_read_config_dword(dev, PCI_VENDOR_ID, &header);
1117 
1118 	for (pos = PCI_CFG_SPACE_SIZE;
1119 	     pos < PCI_CFG_SPACE_EXP_SIZE; pos += PCI_CFG_SPACE_SIZE) {
1120 		if (pci_read_config_dword(dev, pos, &tmp) != PCIBIOS_SUCCESSFUL
1121 		    || header != tmp)
1122 			return false;
1123 	}
1124 
1125 	return true;
1126 #else
1127 	return false;
1128 #endif
1129 }
1130 
1131 /**
1132  * pci_cfg_space_size - get the configuration space size of the PCI device.
1133  * @dev: PCI device
1134  *
1135  * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
1136  * have 4096 bytes.  Even if the device is capable, that doesn't mean we can
1137  * access it.  Maybe we don't have a way to generate extended config space
1138  * accesses, or the device is behind a reverse Express bridge.  So we try
1139  * reading the dword at 0x100 which must either be 0 or a valid extended
1140  * capability header.
1141  */
pci_cfg_space_size_ext(struct pci_dev * dev)1142 static int pci_cfg_space_size_ext(struct pci_dev *dev)
1143 {
1144 	u32 status;
1145 	int pos = PCI_CFG_SPACE_SIZE;
1146 
1147 	if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
1148 		return PCI_CFG_SPACE_SIZE;
1149 	if (status == 0xffffffff || pci_ext_cfg_is_aliased(dev))
1150 		return PCI_CFG_SPACE_SIZE;
1151 
1152 	return PCI_CFG_SPACE_EXP_SIZE;
1153 }
1154 
pci_cfg_space_size(struct pci_dev * dev)1155 int pci_cfg_space_size(struct pci_dev *dev)
1156 {
1157 	int pos;
1158 	u32 status;
1159 	u16 class;
1160 
1161 	class = dev->class >> 8;
1162 	if (class == PCI_CLASS_BRIDGE_HOST)
1163 		return pci_cfg_space_size_ext(dev);
1164 
1165 	if (pci_is_pcie(dev))
1166 		return pci_cfg_space_size_ext(dev);
1167 
1168 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1169 	if (!pos)
1170 		return PCI_CFG_SPACE_SIZE;
1171 
1172 	pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
1173 	if (status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ))
1174 		return pci_cfg_space_size_ext(dev);
1175 
1176 	return PCI_CFG_SPACE_SIZE;
1177 }
1178 
1179 #define LEGACY_IO_RESOURCE	(IORESOURCE_IO | IORESOURCE_PCI_FIXED)
1180 
pci_msi_setup_pci_dev(struct pci_dev * dev)1181 static void pci_msi_setup_pci_dev(struct pci_dev *dev)
1182 {
1183 	/*
1184 	 * Disable the MSI hardware to avoid screaming interrupts
1185 	 * during boot.  This is the power on reset default so
1186 	 * usually this should be a noop.
1187 	 */
1188 	dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI);
1189 	if (dev->msi_cap)
1190 		pci_msi_set_enable(dev, 0);
1191 
1192 	dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1193 	if (dev->msix_cap)
1194 		pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
1195 }
1196 
1197 /**
1198  * pci_setup_device - fill in class and map information of a device
1199  * @dev: the device structure to fill
1200  *
1201  * Initialize the device structure with information about the device's
1202  * vendor,class,memory and IO-space addresses,IRQ lines etc.
1203  * Called at initialisation of the PCI subsystem and by CardBus services.
1204  * Returns 0 on success and negative if unknown type of device (not normal,
1205  * bridge or CardBus).
1206  */
pci_setup_device(struct pci_dev * dev)1207 int pci_setup_device(struct pci_dev *dev)
1208 {
1209 	u32 class;
1210 	u16 cmd;
1211 	u8 hdr_type;
1212 	int pos = 0;
1213 	struct pci_bus_region region;
1214 	struct resource *res;
1215 
1216 	if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
1217 		return -EIO;
1218 
1219 	dev->sysdata = dev->bus->sysdata;
1220 	dev->dev.parent = dev->bus->bridge;
1221 	dev->dev.bus = &pci_bus_type;
1222 	dev->hdr_type = hdr_type & 0x7f;
1223 	dev->multifunction = !!(hdr_type & 0x80);
1224 	dev->error_state = pci_channel_io_normal;
1225 	set_pcie_port_type(dev);
1226 
1227 	pci_dev_assign_slot(dev);
1228 	/* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
1229 	   set this higher, assuming the system even supports it.  */
1230 	dev->dma_mask = 0xffffffff;
1231 
1232 	dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
1233 		     dev->bus->number, PCI_SLOT(dev->devfn),
1234 		     PCI_FUNC(dev->devfn));
1235 
1236 	pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
1237 	dev->revision = class & 0xff;
1238 	dev->class = class >> 8;		    /* upper 3 bytes */
1239 
1240 	dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n",
1241 		   dev->vendor, dev->device, dev->hdr_type, dev->class);
1242 
1243 	/* need to have dev->class ready */
1244 	dev->cfg_size = pci_cfg_space_size(dev);
1245 
1246 	/* "Unknown power state" */
1247 	dev->current_state = PCI_UNKNOWN;
1248 
1249 	/* Early fixups, before probing the BARs */
1250 	pci_fixup_device(pci_fixup_early, dev);
1251 	/* device class may be changed after fixup */
1252 	class = dev->class >> 8;
1253 
1254 	if (dev->non_compliant_bars) {
1255 		pci_read_config_word(dev, PCI_COMMAND, &cmd);
1256 		if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
1257 			dev_info(&dev->dev, "device has non-compliant BARs; disabling IO/MEM decoding\n");
1258 			cmd &= ~PCI_COMMAND_IO;
1259 			cmd &= ~PCI_COMMAND_MEMORY;
1260 			pci_write_config_word(dev, PCI_COMMAND, cmd);
1261 		}
1262 	}
1263 
1264 	switch (dev->hdr_type) {		    /* header type */
1265 	case PCI_HEADER_TYPE_NORMAL:		    /* standard header */
1266 		if (class == PCI_CLASS_BRIDGE_PCI)
1267 			goto bad;
1268 		pci_read_irq(dev);
1269 		pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
1270 		pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1271 		pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
1272 
1273 		/*
1274 		 * Do the ugly legacy mode stuff here rather than broken chip
1275 		 * quirk code. Legacy mode ATA controllers have fixed
1276 		 * addresses. These are not always echoed in BAR0-3, and
1277 		 * BAR0-3 in a few cases contain junk!
1278 		 */
1279 		if (class == PCI_CLASS_STORAGE_IDE) {
1280 			u8 progif;
1281 			pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
1282 			if ((progif & 1) == 0) {
1283 				region.start = 0x1F0;
1284 				region.end = 0x1F7;
1285 				res = &dev->resource[0];
1286 				res->flags = LEGACY_IO_RESOURCE;
1287 				pcibios_bus_to_resource(dev->bus, res, &region);
1288 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x10: %pR\n",
1289 					 res);
1290 				region.start = 0x3F6;
1291 				region.end = 0x3F6;
1292 				res = &dev->resource[1];
1293 				res->flags = LEGACY_IO_RESOURCE;
1294 				pcibios_bus_to_resource(dev->bus, res, &region);
1295 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x14: %pR\n",
1296 					 res);
1297 			}
1298 			if ((progif & 4) == 0) {
1299 				region.start = 0x170;
1300 				region.end = 0x177;
1301 				res = &dev->resource[2];
1302 				res->flags = LEGACY_IO_RESOURCE;
1303 				pcibios_bus_to_resource(dev->bus, res, &region);
1304 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x18: %pR\n",
1305 					 res);
1306 				region.start = 0x376;
1307 				region.end = 0x376;
1308 				res = &dev->resource[3];
1309 				res->flags = LEGACY_IO_RESOURCE;
1310 				pcibios_bus_to_resource(dev->bus, res, &region);
1311 				dev_info(&dev->dev, "legacy IDE quirk: reg 0x1c: %pR\n",
1312 					 res);
1313 			}
1314 		}
1315 		break;
1316 
1317 	case PCI_HEADER_TYPE_BRIDGE:		    /* bridge header */
1318 		if (class != PCI_CLASS_BRIDGE_PCI)
1319 			goto bad;
1320 		/* The PCI-to-PCI bridge spec requires that subtractive
1321 		   decoding (i.e. transparent) bridge must have programming
1322 		   interface code of 0x01. */
1323 		pci_read_irq(dev);
1324 		dev->transparent = ((dev->class & 0xff) == 1);
1325 		pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
1326 		set_pcie_hotplug_bridge(dev);
1327 		pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
1328 		if (pos) {
1329 			pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor);
1330 			pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device);
1331 		}
1332 		break;
1333 
1334 	case PCI_HEADER_TYPE_CARDBUS:		    /* CardBus bridge header */
1335 		if (class != PCI_CLASS_BRIDGE_CARDBUS)
1336 			goto bad;
1337 		pci_read_irq(dev);
1338 		pci_read_bases(dev, 1, 0);
1339 		pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1340 		pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
1341 		break;
1342 
1343 	default:				    /* unknown header */
1344 		dev_err(&dev->dev, "unknown header type %02x, ignoring device\n",
1345 			dev->hdr_type);
1346 		return -EIO;
1347 
1348 	bad:
1349 		dev_err(&dev->dev, "ignoring class %#08x (doesn't match header type %02x)\n",
1350 			dev->class, dev->hdr_type);
1351 		dev->class = PCI_CLASS_NOT_DEFINED << 8;
1352 	}
1353 
1354 	/* We found a fine healthy device, go go go... */
1355 	return 0;
1356 }
1357 
pci_configure_mps(struct pci_dev * dev)1358 static void pci_configure_mps(struct pci_dev *dev)
1359 {
1360 	struct pci_dev *bridge = pci_upstream_bridge(dev);
1361 	int mps, p_mps, rc;
1362 
1363 	if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge))
1364 		return;
1365 
1366 	mps = pcie_get_mps(dev);
1367 	p_mps = pcie_get_mps(bridge);
1368 
1369 	if (mps == p_mps)
1370 		return;
1371 
1372 	if (pcie_bus_config == PCIE_BUS_TUNE_OFF) {
1373 		dev_warn(&dev->dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1374 			 mps, pci_name(bridge), p_mps);
1375 		return;
1376 	}
1377 
1378 	/*
1379 	 * Fancier MPS configuration is done later by
1380 	 * pcie_bus_configure_settings()
1381 	 */
1382 	if (pcie_bus_config != PCIE_BUS_DEFAULT)
1383 		return;
1384 
1385 	rc = pcie_set_mps(dev, p_mps);
1386 	if (rc) {
1387 		dev_warn(&dev->dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1388 			 p_mps);
1389 		return;
1390 	}
1391 
1392 	dev_info(&dev->dev, "Max Payload Size set to %d (was %d, max %d)\n",
1393 		 p_mps, mps, 128 << dev->pcie_mpss);
1394 }
1395 
1396 static struct hpp_type0 pci_default_type0 = {
1397 	.revision = 1,
1398 	.cache_line_size = 8,
1399 	.latency_timer = 0x40,
1400 	.enable_serr = 0,
1401 	.enable_perr = 0,
1402 };
1403 
program_hpp_type0(struct pci_dev * dev,struct hpp_type0 * hpp)1404 static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
1405 {
1406 	u16 pci_cmd, pci_bctl;
1407 
1408 	if (!hpp)
1409 		hpp = &pci_default_type0;
1410 
1411 	if (hpp->revision > 1) {
1412 		dev_warn(&dev->dev,
1413 			 "PCI settings rev %d not supported; using defaults\n",
1414 			 hpp->revision);
1415 		hpp = &pci_default_type0;
1416 	}
1417 
1418 	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size);
1419 	pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer);
1420 	pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
1421 	if (hpp->enable_serr)
1422 		pci_cmd |= PCI_COMMAND_SERR;
1423 	if (hpp->enable_perr)
1424 		pci_cmd |= PCI_COMMAND_PARITY;
1425 	pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
1426 
1427 	/* Program bridge control value */
1428 	if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
1429 		pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
1430 				      hpp->latency_timer);
1431 		pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
1432 		if (hpp->enable_serr)
1433 			pci_bctl |= PCI_BRIDGE_CTL_SERR;
1434 		if (hpp->enable_perr)
1435 			pci_bctl |= PCI_BRIDGE_CTL_PARITY;
1436 		pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
1437 	}
1438 }
1439 
program_hpp_type1(struct pci_dev * dev,struct hpp_type1 * hpp)1440 static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
1441 {
1442 	int pos;
1443 
1444 	if (!hpp)
1445 		return;
1446 
1447 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1448 	if (!pos)
1449 		return;
1450 
1451 	dev_warn(&dev->dev, "PCI-X settings not supported\n");
1452 }
1453 
pcie_root_rcb_set(struct pci_dev * dev)1454 static bool pcie_root_rcb_set(struct pci_dev *dev)
1455 {
1456 	struct pci_dev *rp = pcie_find_root_port(dev);
1457 	u16 lnkctl;
1458 
1459 	if (!rp)
1460 		return false;
1461 
1462 	pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl);
1463 	if (lnkctl & PCI_EXP_LNKCTL_RCB)
1464 		return true;
1465 
1466 	return false;
1467 }
1468 
program_hpp_type2(struct pci_dev * dev,struct hpp_type2 * hpp)1469 static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
1470 {
1471 	int pos;
1472 	u32 reg32;
1473 
1474 	if (!hpp)
1475 		return;
1476 
1477 	if (!pci_is_pcie(dev))
1478 		return;
1479 
1480 	if (hpp->revision > 1) {
1481 		dev_warn(&dev->dev, "PCIe settings rev %d not supported\n",
1482 			 hpp->revision);
1483 		return;
1484 	}
1485 
1486 	/*
1487 	 * Don't allow _HPX to change MPS or MRRS settings.  We manage
1488 	 * those to make sure they're consistent with the rest of the
1489 	 * platform.
1490 	 */
1491 	hpp->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD |
1492 				    PCI_EXP_DEVCTL_READRQ;
1493 	hpp->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD |
1494 				    PCI_EXP_DEVCTL_READRQ);
1495 
1496 	/* Initialize Device Control Register */
1497 	pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
1498 			~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
1499 
1500 	/* Initialize Link Control Register */
1501 	if (pcie_cap_has_lnkctl(dev)) {
1502 
1503 		/*
1504 		 * If the Root Port supports Read Completion Boundary of
1505 		 * 128, set RCB to 128.  Otherwise, clear it.
1506 		 */
1507 		hpp->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB;
1508 		hpp->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB;
1509 		if (pcie_root_rcb_set(dev))
1510 			hpp->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB;
1511 
1512 		pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
1513 			~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
1514 	}
1515 
1516 	/* Find Advanced Error Reporting Enhanced Capability */
1517 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
1518 	if (!pos)
1519 		return;
1520 
1521 	/* Initialize Uncorrectable Error Mask Register */
1522 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
1523 	reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or;
1524 	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
1525 
1526 	/* Initialize Uncorrectable Error Severity Register */
1527 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
1528 	reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or;
1529 	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
1530 
1531 	/* Initialize Correctable Error Mask Register */
1532 	pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
1533 	reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or;
1534 	pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
1535 
1536 	/* Initialize Advanced Error Capabilities and Control Register */
1537 	pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
1538 	reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
1539 	pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
1540 
1541 	/*
1542 	 * FIXME: The following two registers are not supported yet.
1543 	 *
1544 	 *   o Secondary Uncorrectable Error Severity Register
1545 	 *   o Secondary Uncorrectable Error Mask Register
1546 	 */
1547 }
1548 
pci_configure_device(struct pci_dev * dev)1549 static void pci_configure_device(struct pci_dev *dev)
1550 {
1551 	struct hotplug_params hpp;
1552 	int ret;
1553 
1554 	pci_configure_mps(dev);
1555 
1556 	memset(&hpp, 0, sizeof(hpp));
1557 	ret = pci_get_hp_params(dev, &hpp);
1558 	if (ret)
1559 		return;
1560 
1561 	program_hpp_type2(dev, hpp.t2);
1562 	program_hpp_type1(dev, hpp.t1);
1563 	program_hpp_type0(dev, hpp.t0);
1564 }
1565 
pci_release_capabilities(struct pci_dev * dev)1566 static void pci_release_capabilities(struct pci_dev *dev)
1567 {
1568 	pci_vpd_release(dev);
1569 	pci_iov_release(dev);
1570 	pci_free_cap_save_buffers(dev);
1571 }
1572 
1573 /**
1574  * pci_release_dev - free a pci device structure when all users of it are finished.
1575  * @dev: device that's been disconnected
1576  *
1577  * Will be called only by the device core when all users of this pci device are
1578  * done.
1579  */
pci_release_dev(struct device * dev)1580 static void pci_release_dev(struct device *dev)
1581 {
1582 	struct pci_dev *pci_dev;
1583 
1584 	pci_dev = to_pci_dev(dev);
1585 	pci_release_capabilities(pci_dev);
1586 	pci_release_of_node(pci_dev);
1587 	pcibios_release_device(pci_dev);
1588 	pci_bus_put(pci_dev->bus);
1589 	kfree(pci_dev->driver_override);
1590 	kfree(pci_dev->dma_alias_mask);
1591 	kfree(pci_dev);
1592 }
1593 
pci_alloc_dev(struct pci_bus * bus)1594 struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
1595 {
1596 	struct pci_dev *dev;
1597 
1598 	dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
1599 	if (!dev)
1600 		return NULL;
1601 
1602 	INIT_LIST_HEAD(&dev->bus_list);
1603 	dev->dev.type = &pci_dev_type;
1604 	dev->bus = pci_bus_get(bus);
1605 
1606 	return dev;
1607 }
1608 EXPORT_SYMBOL(pci_alloc_dev);
1609 
pci_bus_read_dev_vendor_id(struct pci_bus * bus,int devfn,u32 * l,int crs_timeout)1610 bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
1611 				int crs_timeout)
1612 {
1613 	int delay = 1;
1614 
1615 	if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1616 		return false;
1617 
1618 	/* some broken boards return 0 or ~0 if a slot is empty: */
1619 	if (*l == 0xffffffff || *l == 0x00000000 ||
1620 	    *l == 0x0000ffff || *l == 0xffff0000)
1621 		return false;
1622 
1623 	/*
1624 	 * Configuration Request Retry Status.  Some root ports return the
1625 	 * actual device ID instead of the synthetic ID (0xFFFF) required
1626 	 * by the PCIe spec.  Ignore the device ID and only check for
1627 	 * (vendor id == 1).
1628 	 */
1629 	while ((*l & 0xffff) == 0x0001) {
1630 		if (!crs_timeout)
1631 			return false;
1632 
1633 		msleep(delay);
1634 		delay *= 2;
1635 		if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1636 			return false;
1637 		/* Card hasn't responded in 60 seconds?  Must be stuck. */
1638 		if (delay > crs_timeout) {
1639 			printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not responding\n",
1640 			       pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
1641 			       PCI_FUNC(devfn));
1642 			return false;
1643 		}
1644 	}
1645 
1646 	return true;
1647 }
1648 EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
1649 
1650 /*
1651  * Read the config data for a PCI device, sanity-check it
1652  * and fill in the dev structure...
1653  */
pci_scan_device(struct pci_bus * bus,int devfn)1654 static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
1655 {
1656 	struct pci_dev *dev;
1657 	u32 l;
1658 
1659 	if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000))
1660 		return NULL;
1661 
1662 	dev = pci_alloc_dev(bus);
1663 	if (!dev)
1664 		return NULL;
1665 
1666 	dev->devfn = devfn;
1667 	dev->vendor = l & 0xffff;
1668 	dev->device = (l >> 16) & 0xffff;
1669 
1670 	pci_set_of_node(dev);
1671 
1672 	if (pci_setup_device(dev)) {
1673 		pci_bus_put(dev->bus);
1674 		kfree(dev);
1675 		return NULL;
1676 	}
1677 
1678 	return dev;
1679 }
1680 
pci_init_capabilities(struct pci_dev * dev)1681 static void pci_init_capabilities(struct pci_dev *dev)
1682 {
1683 	/* Enhanced Allocation */
1684 	pci_ea_init(dev);
1685 
1686 	/* Setup MSI caps & disable MSI/MSI-X interrupts */
1687 	pci_msi_setup_pci_dev(dev);
1688 
1689 	/* Buffers for saving PCIe and PCI-X capabilities */
1690 	pci_allocate_cap_save_buffers(dev);
1691 
1692 	/* Power Management */
1693 	pci_pm_init(dev);
1694 
1695 	/* Vital Product Data */
1696 	pci_vpd_init(dev);
1697 
1698 	/* Alternative Routing-ID Forwarding */
1699 	pci_configure_ari(dev);
1700 
1701 	/* Single Root I/O Virtualization */
1702 	pci_iov_init(dev);
1703 
1704 	/* Address Translation Services */
1705 	pci_ats_init(dev);
1706 
1707 	/* Enable ACS P2P upstream forwarding */
1708 	pci_enable_acs(dev);
1709 
1710 	/* Precision Time Measurement */
1711 	pci_ptm_init(dev);
1712 
1713 	/* Advanced Error Reporting */
1714 	pci_aer_init(dev);
1715 }
1716 
1717 /*
1718  * This is the equivalent of pci_host_bridge_msi_domain that acts on
1719  * devices. Firmware interfaces that can select the MSI domain on a
1720  * per-device basis should be called from here.
1721  */
pci_dev_msi_domain(struct pci_dev * dev)1722 static struct irq_domain *pci_dev_msi_domain(struct pci_dev *dev)
1723 {
1724 	struct irq_domain *d;
1725 
1726 	/*
1727 	 * If a domain has been set through the pcibios_add_device
1728 	 * callback, then this is the one (platform code knows best).
1729 	 */
1730 	d = dev_get_msi_domain(&dev->dev);
1731 	if (d)
1732 		return d;
1733 
1734 	/*
1735 	 * Let's see if we have a firmware interface able to provide
1736 	 * the domain.
1737 	 */
1738 	d = pci_msi_get_device_domain(dev);
1739 	if (d)
1740 		return d;
1741 
1742 	return NULL;
1743 }
1744 
pci_set_msi_domain(struct pci_dev * dev)1745 static void pci_set_msi_domain(struct pci_dev *dev)
1746 {
1747 	struct irq_domain *d;
1748 
1749 	/*
1750 	 * If the platform or firmware interfaces cannot supply a
1751 	 * device-specific MSI domain, then inherit the default domain
1752 	 * from the host bridge itself.
1753 	 */
1754 	d = pci_dev_msi_domain(dev);
1755 	if (!d)
1756 		d = dev_get_msi_domain(&dev->bus->dev);
1757 
1758 	dev_set_msi_domain(&dev->dev, d);
1759 }
1760 
1761 /**
1762  * pci_dma_configure - Setup DMA configuration
1763  * @dev: ptr to pci_dev struct of the PCI device
1764  *
1765  * Function to update PCI devices's DMA configuration using the same
1766  * info from the OF node or ACPI node of host bridge's parent (if any).
1767  */
pci_dma_configure(struct pci_dev * dev)1768 static void pci_dma_configure(struct pci_dev *dev)
1769 {
1770 	struct device *bridge = pci_get_host_bridge_device(dev);
1771 
1772 	if (IS_ENABLED(CONFIG_OF) &&
1773 		bridge->parent && bridge->parent->of_node) {
1774 			of_dma_configure(&dev->dev, bridge->parent->of_node);
1775 	} else if (has_acpi_companion(bridge)) {
1776 		struct acpi_device *adev = to_acpi_device_node(bridge->fwnode);
1777 		enum dev_dma_attr attr = acpi_get_dma_attr(adev);
1778 
1779 		if (attr == DEV_DMA_NOT_SUPPORTED)
1780 			dev_warn(&dev->dev, "DMA not supported.\n");
1781 		else
1782 			arch_setup_dma_ops(&dev->dev, 0, 0, NULL,
1783 					   attr == DEV_DMA_COHERENT);
1784 	}
1785 
1786 	pci_put_host_bridge_device(bridge);
1787 }
1788 
pci_device_add(struct pci_dev * dev,struct pci_bus * bus)1789 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
1790 {
1791 	int ret;
1792 
1793 	pci_configure_device(dev);
1794 
1795 	device_initialize(&dev->dev);
1796 	dev->dev.release = pci_release_dev;
1797 
1798 	set_dev_node(&dev->dev, pcibus_to_node(bus));
1799 	dev->dev.dma_mask = &dev->dma_mask;
1800 	dev->dev.dma_parms = &dev->dma_parms;
1801 	dev->dev.coherent_dma_mask = 0xffffffffull;
1802 	pci_dma_configure(dev);
1803 
1804 	pci_set_dma_max_seg_size(dev, 65536);
1805 	pci_set_dma_seg_boundary(dev, 0xffffffff);
1806 
1807 	/* Fix up broken headers */
1808 	pci_fixup_device(pci_fixup_header, dev);
1809 
1810 	/* moved out from quirk header fixup code */
1811 	pci_reassigndev_resource_alignment(dev);
1812 
1813 	/* Clear the state_saved flag. */
1814 	dev->state_saved = false;
1815 
1816 	/* Initialize various capabilities */
1817 	pci_init_capabilities(dev);
1818 
1819 	/*
1820 	 * Add the device to our list of discovered devices
1821 	 * and the bus list for fixup functions, etc.
1822 	 */
1823 	down_write(&pci_bus_sem);
1824 	list_add_tail(&dev->bus_list, &bus->devices);
1825 	up_write(&pci_bus_sem);
1826 
1827 	ret = pcibios_add_device(dev);
1828 	WARN_ON(ret < 0);
1829 
1830 	/* Setup MSI irq domain */
1831 	pci_set_msi_domain(dev);
1832 
1833 	/* Notifier could use PCI capabilities */
1834 	dev->match_driver = false;
1835 	ret = device_add(&dev->dev);
1836 	WARN_ON(ret < 0);
1837 }
1838 
pci_scan_single_device(struct pci_bus * bus,int devfn)1839 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn)
1840 {
1841 	struct pci_dev *dev;
1842 
1843 	dev = pci_get_slot(bus, devfn);
1844 	if (dev) {
1845 		pci_dev_put(dev);
1846 		return dev;
1847 	}
1848 
1849 	dev = pci_scan_device(bus, devfn);
1850 	if (!dev)
1851 		return NULL;
1852 
1853 	pci_device_add(dev, bus);
1854 
1855 	return dev;
1856 }
1857 EXPORT_SYMBOL(pci_scan_single_device);
1858 
next_fn(struct pci_bus * bus,struct pci_dev * dev,unsigned fn)1859 static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn)
1860 {
1861 	int pos;
1862 	u16 cap = 0;
1863 	unsigned next_fn;
1864 
1865 	if (pci_ari_enabled(bus)) {
1866 		if (!dev)
1867 			return 0;
1868 		pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1869 		if (!pos)
1870 			return 0;
1871 
1872 		pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
1873 		next_fn = PCI_ARI_CAP_NFN(cap);
1874 		if (next_fn <= fn)
1875 			return 0;	/* protect against malformed list */
1876 
1877 		return next_fn;
1878 	}
1879 
1880 	/* dev may be NULL for non-contiguous multifunction devices */
1881 	if (!dev || dev->multifunction)
1882 		return (fn + 1) % 8;
1883 
1884 	return 0;
1885 }
1886 
only_one_child(struct pci_bus * bus)1887 static int only_one_child(struct pci_bus *bus)
1888 {
1889 	struct pci_dev *parent = bus->self;
1890 
1891 	if (!parent || !pci_is_pcie(parent))
1892 		return 0;
1893 	if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT)
1894 		return 1;
1895 
1896 	/*
1897 	 * PCIe downstream ports are bridges that normally lead to only a
1898 	 * device 0, but if PCI_SCAN_ALL_PCIE_DEVS is set, scan all
1899 	 * possible devices, not just device 0.  See PCIe spec r3.0,
1900 	 * sec 7.3.1.
1901 	 */
1902 	if (parent->has_secondary_link &&
1903 	    !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
1904 		return 1;
1905 	return 0;
1906 }
1907 
1908 /**
1909  * pci_scan_slot - scan a PCI slot on a bus for devices.
1910  * @bus: PCI bus to scan
1911  * @devfn: slot number to scan (must have zero function.)
1912  *
1913  * Scan a PCI slot on the specified PCI bus for devices, adding
1914  * discovered devices to the @bus->devices list.  New devices
1915  * will not have is_added set.
1916  *
1917  * Returns the number of new devices found.
1918  */
pci_scan_slot(struct pci_bus * bus,int devfn)1919 int pci_scan_slot(struct pci_bus *bus, int devfn)
1920 {
1921 	unsigned fn, nr = 0;
1922 	struct pci_dev *dev;
1923 
1924 	if (only_one_child(bus) && (devfn > 0))
1925 		return 0; /* Already scanned the entire slot */
1926 
1927 	dev = pci_scan_single_device(bus, devfn);
1928 	if (!dev)
1929 		return 0;
1930 	if (!dev->is_added)
1931 		nr++;
1932 
1933 	for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
1934 		dev = pci_scan_single_device(bus, devfn + fn);
1935 		if (dev) {
1936 			if (!dev->is_added)
1937 				nr++;
1938 			dev->multifunction = 1;
1939 		}
1940 	}
1941 
1942 	/* only one slot has pcie device */
1943 	if (bus->self && nr)
1944 		pcie_aspm_init_link_state(bus->self);
1945 
1946 	return nr;
1947 }
1948 EXPORT_SYMBOL(pci_scan_slot);
1949 
pcie_find_smpss(struct pci_dev * dev,void * data)1950 static int pcie_find_smpss(struct pci_dev *dev, void *data)
1951 {
1952 	u8 *smpss = data;
1953 
1954 	if (!pci_is_pcie(dev))
1955 		return 0;
1956 
1957 	/*
1958 	 * We don't have a way to change MPS settings on devices that have
1959 	 * drivers attached.  A hot-added device might support only the minimum
1960 	 * MPS setting (MPS=128).  Therefore, if the fabric contains a bridge
1961 	 * where devices may be hot-added, we limit the fabric MPS to 128 so
1962 	 * hot-added devices will work correctly.
1963 	 *
1964 	 * However, if we hot-add a device to a slot directly below a Root
1965 	 * Port, it's impossible for there to be other existing devices below
1966 	 * the port.  We don't limit the MPS in this case because we can
1967 	 * reconfigure MPS on both the Root Port and the hot-added device,
1968 	 * and there are no other devices involved.
1969 	 *
1970 	 * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA.
1971 	 */
1972 	if (dev->is_hotplug_bridge &&
1973 	    pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
1974 		*smpss = 0;
1975 
1976 	if (*smpss > dev->pcie_mpss)
1977 		*smpss = dev->pcie_mpss;
1978 
1979 	return 0;
1980 }
1981 
pcie_write_mps(struct pci_dev * dev,int mps)1982 static void pcie_write_mps(struct pci_dev *dev, int mps)
1983 {
1984 	int rc;
1985 
1986 	if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
1987 		mps = 128 << dev->pcie_mpss;
1988 
1989 		if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT &&
1990 		    dev->bus->self)
1991 			/* For "Performance", the assumption is made that
1992 			 * downstream communication will never be larger than
1993 			 * the MRRS.  So, the MPS only needs to be configured
1994 			 * for the upstream communication.  This being the case,
1995 			 * walk from the top down and set the MPS of the child
1996 			 * to that of the parent bus.
1997 			 *
1998 			 * Configure the device MPS with the smaller of the
1999 			 * device MPSS or the bridge MPS (which is assumed to be
2000 			 * properly configured at this point to the largest
2001 			 * allowable MPS based on its parent bus).
2002 			 */
2003 			mps = min(mps, pcie_get_mps(dev->bus->self));
2004 	}
2005 
2006 	rc = pcie_set_mps(dev, mps);
2007 	if (rc)
2008 		dev_err(&dev->dev, "Failed attempting to set the MPS\n");
2009 }
2010 
pcie_write_mrrs(struct pci_dev * dev)2011 static void pcie_write_mrrs(struct pci_dev *dev)
2012 {
2013 	int rc, mrrs;
2014 
2015 	/* In the "safe" case, do not configure the MRRS.  There appear to be
2016 	 * issues with setting MRRS to 0 on a number of devices.
2017 	 */
2018 	if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
2019 		return;
2020 
2021 	/* For Max performance, the MRRS must be set to the largest supported
2022 	 * value.  However, it cannot be configured larger than the MPS the
2023 	 * device or the bus can support.  This should already be properly
2024 	 * configured by a prior call to pcie_write_mps.
2025 	 */
2026 	mrrs = pcie_get_mps(dev);
2027 
2028 	/* MRRS is a R/W register.  Invalid values can be written, but a
2029 	 * subsequent read will verify if the value is acceptable or not.
2030 	 * If the MRRS value provided is not acceptable (e.g., too large),
2031 	 * shrink the value until it is acceptable to the HW.
2032 	 */
2033 	while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
2034 		rc = pcie_set_readrq(dev, mrrs);
2035 		if (!rc)
2036 			break;
2037 
2038 		dev_warn(&dev->dev, "Failed attempting to set the MRRS\n");
2039 		mrrs /= 2;
2040 	}
2041 
2042 	if (mrrs < 128)
2043 		dev_err(&dev->dev, "MRRS was unable to be configured with a safe value.  If problems are experienced, try running with pci=pcie_bus_safe\n");
2044 }
2045 
pcie_bus_configure_set(struct pci_dev * dev,void * data)2046 static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
2047 {
2048 	int mps, orig_mps;
2049 
2050 	if (!pci_is_pcie(dev))
2051 		return 0;
2052 
2053 	if (pcie_bus_config == PCIE_BUS_TUNE_OFF ||
2054 	    pcie_bus_config == PCIE_BUS_DEFAULT)
2055 		return 0;
2056 
2057 	mps = 128 << *(u8 *)data;
2058 	orig_mps = pcie_get_mps(dev);
2059 
2060 	pcie_write_mps(dev, mps);
2061 	pcie_write_mrrs(dev);
2062 
2063 	dev_info(&dev->dev, "Max Payload Size set to %4d/%4d (was %4d), Max Read Rq %4d\n",
2064 		 pcie_get_mps(dev), 128 << dev->pcie_mpss,
2065 		 orig_mps, pcie_get_readrq(dev));
2066 
2067 	return 0;
2068 }
2069 
2070 /* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down,
2071  * parents then children fashion.  If this changes, then this code will not
2072  * work as designed.
2073  */
pcie_bus_configure_settings(struct pci_bus * bus)2074 void pcie_bus_configure_settings(struct pci_bus *bus)
2075 {
2076 	u8 smpss = 0;
2077 
2078 	if (!bus->self)
2079 		return;
2080 
2081 	if (!pci_is_pcie(bus->self))
2082 		return;
2083 
2084 	/* FIXME - Peer to peer DMA is possible, though the endpoint would need
2085 	 * to be aware of the MPS of the destination.  To work around this,
2086 	 * simply force the MPS of the entire system to the smallest possible.
2087 	 */
2088 	if (pcie_bus_config == PCIE_BUS_PEER2PEER)
2089 		smpss = 0;
2090 
2091 	if (pcie_bus_config == PCIE_BUS_SAFE) {
2092 		smpss = bus->self->pcie_mpss;
2093 
2094 		pcie_find_smpss(bus->self, &smpss);
2095 		pci_walk_bus(bus, pcie_find_smpss, &smpss);
2096 	}
2097 
2098 	pcie_bus_configure_set(bus->self, &smpss);
2099 	pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
2100 }
2101 EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
2102 
pci_scan_child_bus(struct pci_bus * bus)2103 unsigned int pci_scan_child_bus(struct pci_bus *bus)
2104 {
2105 	unsigned int devfn, pass, max = bus->busn_res.start;
2106 	struct pci_dev *dev;
2107 
2108 	dev_dbg(&bus->dev, "scanning bus\n");
2109 
2110 	/* Go find them, Rover! */
2111 	for (devfn = 0; devfn < 0x100; devfn += 8)
2112 		pci_scan_slot(bus, devfn);
2113 
2114 	/* Reserve buses for SR-IOV capability. */
2115 	max += pci_iov_bus_range(bus);
2116 
2117 	/*
2118 	 * After performing arch-dependent fixup of the bus, look behind
2119 	 * all PCI-to-PCI bridges on this bus.
2120 	 */
2121 	if (!bus->is_added) {
2122 		dev_dbg(&bus->dev, "fixups for bus\n");
2123 		pcibios_fixup_bus(bus);
2124 		bus->is_added = 1;
2125 	}
2126 
2127 	for (pass = 0; pass < 2; pass++)
2128 		list_for_each_entry(dev, &bus->devices, bus_list) {
2129 			if (pci_is_bridge(dev))
2130 				max = pci_scan_bridge(bus, dev, max, pass);
2131 		}
2132 
2133 	/*
2134 	 * Make sure a hotplug bridge has at least the minimum requested
2135 	 * number of buses.
2136 	 */
2137 	if (bus->self && bus->self->is_hotplug_bridge && pci_hotplug_bus_size) {
2138 		if (max - bus->busn_res.start < pci_hotplug_bus_size - 1)
2139 			max = bus->busn_res.start + pci_hotplug_bus_size - 1;
2140 
2141 		/* Do not allocate more buses than we have room left */
2142 		if (max > bus->busn_res.end)
2143 			max = bus->busn_res.end;
2144 	}
2145 
2146 	/*
2147 	 * We've scanned the bus and so we know all about what's on
2148 	 * the other side of any bridges that may be on this bus plus
2149 	 * any devices.
2150 	 *
2151 	 * Return how far we've got finding sub-buses.
2152 	 */
2153 	dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
2154 	return max;
2155 }
2156 EXPORT_SYMBOL_GPL(pci_scan_child_bus);
2157 
2158 /**
2159  * pcibios_root_bridge_prepare - Platform-specific host bridge setup.
2160  * @bridge: Host bridge to set up.
2161  *
2162  * Default empty implementation.  Replace with an architecture-specific setup
2163  * routine, if necessary.
2164  */
pcibios_root_bridge_prepare(struct pci_host_bridge * bridge)2165 int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
2166 {
2167 	return 0;
2168 }
2169 
pcibios_add_bus(struct pci_bus * bus)2170 void __weak pcibios_add_bus(struct pci_bus *bus)
2171 {
2172 }
2173 
pcibios_remove_bus(struct pci_bus * bus)2174 void __weak pcibios_remove_bus(struct pci_bus *bus)
2175 {
2176 }
2177 
pci_create_root_bus(struct device * parent,int bus,struct pci_ops * ops,void * sysdata,struct list_head * resources)2178 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
2179 		struct pci_ops *ops, void *sysdata, struct list_head *resources)
2180 {
2181 	int error;
2182 	struct pci_host_bridge *bridge;
2183 	struct pci_bus *b, *b2;
2184 	struct resource_entry *window, *n;
2185 	struct resource *res;
2186 	resource_size_t offset;
2187 	char bus_addr[64];
2188 	char *fmt;
2189 
2190 	b = pci_alloc_bus(NULL);
2191 	if (!b)
2192 		return NULL;
2193 
2194 	b->sysdata = sysdata;
2195 	b->ops = ops;
2196 	b->number = b->busn_res.start = bus;
2197 #ifdef CONFIG_PCI_DOMAINS_GENERIC
2198 	b->domain_nr = pci_bus_find_domain_nr(b, parent);
2199 #endif
2200 	b2 = pci_find_bus(pci_domain_nr(b), bus);
2201 	if (b2) {
2202 		/* If we already got to this bus through a different bridge, ignore it */
2203 		dev_dbg(&b2->dev, "bus already known\n");
2204 		goto err_out;
2205 	}
2206 
2207 	bridge = pci_alloc_host_bridge(b);
2208 	if (!bridge)
2209 		goto err_out;
2210 
2211 	bridge->dev.parent = parent;
2212 	bridge->dev.release = pci_release_host_bridge_dev;
2213 	dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(b), bus);
2214 	error = pcibios_root_bridge_prepare(bridge);
2215 	if (error) {
2216 		kfree(bridge);
2217 		goto err_out;
2218 	}
2219 
2220 	error = device_register(&bridge->dev);
2221 	if (error) {
2222 		put_device(&bridge->dev);
2223 		goto err_out;
2224 	}
2225 	b->bridge = get_device(&bridge->dev);
2226 	device_enable_async_suspend(b->bridge);
2227 	pci_set_bus_of_node(b);
2228 	pci_set_bus_msi_domain(b);
2229 
2230 	if (!parent)
2231 		set_dev_node(b->bridge, pcibus_to_node(b));
2232 
2233 	b->dev.class = &pcibus_class;
2234 	b->dev.parent = b->bridge;
2235 	dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus);
2236 	error = device_register(&b->dev);
2237 	if (error)
2238 		goto class_dev_reg_err;
2239 
2240 	pcibios_add_bus(b);
2241 
2242 	/* Create legacy_io and legacy_mem files for this bus */
2243 	pci_create_legacy_files(b);
2244 
2245 	if (parent)
2246 		dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev));
2247 	else
2248 		printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev));
2249 
2250 	/* Add initial resources to the bus */
2251 	resource_list_for_each_entry_safe(window, n, resources) {
2252 		list_move_tail(&window->node, &bridge->windows);
2253 		res = window->res;
2254 		offset = window->offset;
2255 		if (res->flags & IORESOURCE_BUS)
2256 			pci_bus_insert_busn_res(b, bus, res->end);
2257 		else
2258 			pci_bus_add_resource(b, res, 0);
2259 		if (offset) {
2260 			if (resource_type(res) == IORESOURCE_IO)
2261 				fmt = " (bus address [%#06llx-%#06llx])";
2262 			else
2263 				fmt = " (bus address [%#010llx-%#010llx])";
2264 			snprintf(bus_addr, sizeof(bus_addr), fmt,
2265 				 (unsigned long long) (res->start - offset),
2266 				 (unsigned long long) (res->end - offset));
2267 		} else
2268 			bus_addr[0] = '\0';
2269 		dev_info(&b->dev, "root bus resource %pR%s\n", res, bus_addr);
2270 	}
2271 
2272 	down_write(&pci_bus_sem);
2273 	list_add_tail(&b->node, &pci_root_buses);
2274 	up_write(&pci_bus_sem);
2275 
2276 	return b;
2277 
2278 class_dev_reg_err:
2279 	put_device(&bridge->dev);
2280 	device_unregister(&bridge->dev);
2281 err_out:
2282 	kfree(b);
2283 	return NULL;
2284 }
2285 EXPORT_SYMBOL_GPL(pci_create_root_bus);
2286 
pci_bus_insert_busn_res(struct pci_bus * b,int bus,int bus_max)2287 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
2288 {
2289 	struct resource *res = &b->busn_res;
2290 	struct resource *parent_res, *conflict;
2291 
2292 	res->start = bus;
2293 	res->end = bus_max;
2294 	res->flags = IORESOURCE_BUS;
2295 
2296 	if (!pci_is_root_bus(b))
2297 		parent_res = &b->parent->busn_res;
2298 	else {
2299 		parent_res = get_pci_domain_busn_res(pci_domain_nr(b));
2300 		res->flags |= IORESOURCE_PCI_FIXED;
2301 	}
2302 
2303 	conflict = request_resource_conflict(parent_res, res);
2304 
2305 	if (conflict)
2306 		dev_printk(KERN_DEBUG, &b->dev,
2307 			   "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
2308 			    res, pci_is_root_bus(b) ? "domain " : "",
2309 			    parent_res, conflict->name, conflict);
2310 
2311 	return conflict == NULL;
2312 }
2313 
pci_bus_update_busn_res_end(struct pci_bus * b,int bus_max)2314 int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max)
2315 {
2316 	struct resource *res = &b->busn_res;
2317 	struct resource old_res = *res;
2318 	resource_size_t size;
2319 	int ret;
2320 
2321 	if (res->start > bus_max)
2322 		return -EINVAL;
2323 
2324 	size = bus_max - res->start + 1;
2325 	ret = adjust_resource(res, res->start, size);
2326 	dev_printk(KERN_DEBUG, &b->dev,
2327 			"busn_res: %pR end %s updated to %02x\n",
2328 			&old_res, ret ? "can not be" : "is", bus_max);
2329 
2330 	if (!ret && !res->parent)
2331 		pci_bus_insert_busn_res(b, res->start, res->end);
2332 
2333 	return ret;
2334 }
2335 
pci_bus_release_busn_res(struct pci_bus * b)2336 void pci_bus_release_busn_res(struct pci_bus *b)
2337 {
2338 	struct resource *res = &b->busn_res;
2339 	int ret;
2340 
2341 	if (!res->flags || !res->parent)
2342 		return;
2343 
2344 	ret = release_resource(res);
2345 	dev_printk(KERN_DEBUG, &b->dev,
2346 			"busn_res: %pR %s released\n",
2347 			res, ret ? "can not be" : "is");
2348 }
2349 
pci_scan_root_bus_msi(struct device * parent,int bus,struct pci_ops * ops,void * sysdata,struct list_head * resources,struct msi_controller * msi)2350 struct pci_bus *pci_scan_root_bus_msi(struct device *parent, int bus,
2351 		struct pci_ops *ops, void *sysdata,
2352 		struct list_head *resources, struct msi_controller *msi)
2353 {
2354 	struct resource_entry *window;
2355 	bool found = false;
2356 	struct pci_bus *b;
2357 	int max;
2358 
2359 	resource_list_for_each_entry(window, resources)
2360 		if (window->res->flags & IORESOURCE_BUS) {
2361 			found = true;
2362 			break;
2363 		}
2364 
2365 	b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
2366 	if (!b)
2367 		return NULL;
2368 
2369 	b->msi = msi;
2370 
2371 	if (!found) {
2372 		dev_info(&b->dev,
2373 		 "No busn resource found for root bus, will use [bus %02x-ff]\n",
2374 			bus);
2375 		pci_bus_insert_busn_res(b, bus, 255);
2376 	}
2377 
2378 	max = pci_scan_child_bus(b);
2379 
2380 	if (!found)
2381 		pci_bus_update_busn_res_end(b, max);
2382 
2383 	return b;
2384 }
2385 
pci_scan_root_bus(struct device * parent,int bus,struct pci_ops * ops,void * sysdata,struct list_head * resources)2386 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
2387 		struct pci_ops *ops, void *sysdata, struct list_head *resources)
2388 {
2389 	return pci_scan_root_bus_msi(parent, bus, ops, sysdata, resources,
2390 				     NULL);
2391 }
2392 EXPORT_SYMBOL(pci_scan_root_bus);
2393 
pci_scan_bus(int bus,struct pci_ops * ops,void * sysdata)2394 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops,
2395 					void *sysdata)
2396 {
2397 	LIST_HEAD(resources);
2398 	struct pci_bus *b;
2399 
2400 	pci_add_resource(&resources, &ioport_resource);
2401 	pci_add_resource(&resources, &iomem_resource);
2402 	pci_add_resource(&resources, &busn_resource);
2403 	b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources);
2404 	if (b) {
2405 		pci_scan_child_bus(b);
2406 	} else {
2407 		pci_free_resource_list(&resources);
2408 	}
2409 	return b;
2410 }
2411 EXPORT_SYMBOL(pci_scan_bus);
2412 
2413 /**
2414  * pci_rescan_bus_bridge_resize - scan a PCI bus for devices.
2415  * @bridge: PCI bridge for the bus to scan
2416  *
2417  * Scan a PCI bus and child buses for new devices, add them,
2418  * and enable them, resizing bridge mmio/io resource if necessary
2419  * and possible.  The caller must ensure the child devices are already
2420  * removed for resizing to occur.
2421  *
2422  * Returns the max number of subordinate bus discovered.
2423  */
pci_rescan_bus_bridge_resize(struct pci_dev * bridge)2424 unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
2425 {
2426 	unsigned int max;
2427 	struct pci_bus *bus = bridge->subordinate;
2428 
2429 	max = pci_scan_child_bus(bus);
2430 
2431 	pci_assign_unassigned_bridge_resources(bridge);
2432 
2433 	pci_bus_add_devices(bus);
2434 
2435 	return max;
2436 }
2437 
2438 /**
2439  * pci_rescan_bus - scan a PCI bus for devices.
2440  * @bus: PCI bus to scan
2441  *
2442  * Scan a PCI bus and child buses for new devices, adds them,
2443  * and enables them.
2444  *
2445  * Returns the max number of subordinate bus discovered.
2446  */
pci_rescan_bus(struct pci_bus * bus)2447 unsigned int pci_rescan_bus(struct pci_bus *bus)
2448 {
2449 	unsigned int max;
2450 
2451 	max = pci_scan_child_bus(bus);
2452 	pci_assign_unassigned_bus_resources(bus);
2453 	pci_bus_add_devices(bus);
2454 
2455 	return max;
2456 }
2457 EXPORT_SYMBOL_GPL(pci_rescan_bus);
2458 
2459 /*
2460  * pci_rescan_bus(), pci_rescan_bus_bridge_resize() and PCI device removal
2461  * routines should always be executed under this mutex.
2462  */
2463 static DEFINE_MUTEX(pci_rescan_remove_lock);
2464 
pci_lock_rescan_remove(void)2465 void pci_lock_rescan_remove(void)
2466 {
2467 	mutex_lock(&pci_rescan_remove_lock);
2468 }
2469 EXPORT_SYMBOL_GPL(pci_lock_rescan_remove);
2470 
pci_unlock_rescan_remove(void)2471 void pci_unlock_rescan_remove(void)
2472 {
2473 	mutex_unlock(&pci_rescan_remove_lock);
2474 }
2475 EXPORT_SYMBOL_GPL(pci_unlock_rescan_remove);
2476 
pci_sort_bf_cmp(const struct device * d_a,const struct device * d_b)2477 static int __init pci_sort_bf_cmp(const struct device *d_a,
2478 				  const struct device *d_b)
2479 {
2480 	const struct pci_dev *a = to_pci_dev(d_a);
2481 	const struct pci_dev *b = to_pci_dev(d_b);
2482 
2483 	if      (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
2484 	else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return  1;
2485 
2486 	if      (a->bus->number < b->bus->number) return -1;
2487 	else if (a->bus->number > b->bus->number) return  1;
2488 
2489 	if      (a->devfn < b->devfn) return -1;
2490 	else if (a->devfn > b->devfn) return  1;
2491 
2492 	return 0;
2493 }
2494 
pci_sort_breadthfirst(void)2495 void __init pci_sort_breadthfirst(void)
2496 {
2497 	bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);
2498 }
2499