1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (c) 2014 Google, Inc
4 * Written by Simon Glass <sjg@chromium.org>
5 */
6
7 #include <common.h>
8 #include <dm.h>
9 #include <errno.h>
10 #include <inttypes.h>
11 #include <pci.h>
12 #include <asm/io.h>
13 #include <dm/device-internal.h>
14 #include <dm/lists.h>
15 #if defined(CONFIG_X86) && defined(CONFIG_HAVE_FSP)
16 #include <asm/fsp/fsp_support.h>
17 #endif
18 #include "pci_internal.h"
19
20 DECLARE_GLOBAL_DATA_PTR;
21
pci_get_bus(int busnum,struct udevice ** busp)22 int pci_get_bus(int busnum, struct udevice **busp)
23 {
24 int ret;
25
26 ret = uclass_get_device_by_seq(UCLASS_PCI, busnum, busp);
27
28 /* Since buses may not be numbered yet try a little harder with bus 0 */
29 if (ret == -ENODEV) {
30 ret = uclass_first_device_err(UCLASS_PCI, busp);
31 if (ret)
32 return ret;
33 ret = uclass_get_device_by_seq(UCLASS_PCI, busnum, busp);
34 }
35
36 return ret;
37 }
38
pci_get_controller(struct udevice * dev)39 struct udevice *pci_get_controller(struct udevice *dev)
40 {
41 while (device_is_on_pci_bus(dev))
42 dev = dev->parent;
43
44 return dev;
45 }
46
dm_pci_get_bdf(struct udevice * dev)47 pci_dev_t dm_pci_get_bdf(struct udevice *dev)
48 {
49 struct pci_child_platdata *pplat = dev_get_parent_platdata(dev);
50 struct udevice *bus = dev->parent;
51
52 return PCI_ADD_BUS(bus->seq, pplat->devfn);
53 }
54
55 /**
56 * pci_get_bus_max() - returns the bus number of the last active bus
57 *
58 * @return last bus number, or -1 if no active buses
59 */
pci_get_bus_max(void)60 static int pci_get_bus_max(void)
61 {
62 struct udevice *bus;
63 struct uclass *uc;
64 int ret = -1;
65
66 ret = uclass_get(UCLASS_PCI, &uc);
67 uclass_foreach_dev(bus, uc) {
68 if (bus->seq > ret)
69 ret = bus->seq;
70 }
71
72 debug("%s: ret=%d\n", __func__, ret);
73
74 return ret;
75 }
76
pci_last_busno(void)77 int pci_last_busno(void)
78 {
79 return pci_get_bus_max();
80 }
81
pci_get_ff(enum pci_size_t size)82 int pci_get_ff(enum pci_size_t size)
83 {
84 switch (size) {
85 case PCI_SIZE_8:
86 return 0xff;
87 case PCI_SIZE_16:
88 return 0xffff;
89 default:
90 return 0xffffffff;
91 }
92 }
93
pci_bus_find_devfn(struct udevice * bus,pci_dev_t find_devfn,struct udevice ** devp)94 int pci_bus_find_devfn(struct udevice *bus, pci_dev_t find_devfn,
95 struct udevice **devp)
96 {
97 struct udevice *dev;
98
99 for (device_find_first_child(bus, &dev);
100 dev;
101 device_find_next_child(&dev)) {
102 struct pci_child_platdata *pplat;
103
104 pplat = dev_get_parent_platdata(dev);
105 if (pplat && pplat->devfn == find_devfn) {
106 *devp = dev;
107 return 0;
108 }
109 }
110
111 return -ENODEV;
112 }
113
dm_pci_bus_find_bdf(pci_dev_t bdf,struct udevice ** devp)114 int dm_pci_bus_find_bdf(pci_dev_t bdf, struct udevice **devp)
115 {
116 struct udevice *bus;
117 int ret;
118
119 ret = pci_get_bus(PCI_BUS(bdf), &bus);
120 if (ret)
121 return ret;
122 return pci_bus_find_devfn(bus, PCI_MASK_BUS(bdf), devp);
123 }
124
pci_device_matches_ids(struct udevice * dev,struct pci_device_id * ids)125 static int pci_device_matches_ids(struct udevice *dev,
126 struct pci_device_id *ids)
127 {
128 struct pci_child_platdata *pplat;
129 int i;
130
131 pplat = dev_get_parent_platdata(dev);
132 if (!pplat)
133 return -EINVAL;
134 for (i = 0; ids[i].vendor != 0; i++) {
135 if (pplat->vendor == ids[i].vendor &&
136 pplat->device == ids[i].device)
137 return i;
138 }
139
140 return -EINVAL;
141 }
142
pci_bus_find_devices(struct udevice * bus,struct pci_device_id * ids,int * indexp,struct udevice ** devp)143 int pci_bus_find_devices(struct udevice *bus, struct pci_device_id *ids,
144 int *indexp, struct udevice **devp)
145 {
146 struct udevice *dev;
147
148 /* Scan all devices on this bus */
149 for (device_find_first_child(bus, &dev);
150 dev;
151 device_find_next_child(&dev)) {
152 if (pci_device_matches_ids(dev, ids) >= 0) {
153 if ((*indexp)-- <= 0) {
154 *devp = dev;
155 return 0;
156 }
157 }
158 }
159
160 return -ENODEV;
161 }
162
pci_find_device_id(struct pci_device_id * ids,int index,struct udevice ** devp)163 int pci_find_device_id(struct pci_device_id *ids, int index,
164 struct udevice **devp)
165 {
166 struct udevice *bus;
167
168 /* Scan all known buses */
169 for (uclass_first_device(UCLASS_PCI, &bus);
170 bus;
171 uclass_next_device(&bus)) {
172 if (!pci_bus_find_devices(bus, ids, &index, devp))
173 return 0;
174 }
175 *devp = NULL;
176
177 return -ENODEV;
178 }
179
dm_pci_bus_find_device(struct udevice * bus,unsigned int vendor,unsigned int device,int * indexp,struct udevice ** devp)180 static int dm_pci_bus_find_device(struct udevice *bus, unsigned int vendor,
181 unsigned int device, int *indexp,
182 struct udevice **devp)
183 {
184 struct pci_child_platdata *pplat;
185 struct udevice *dev;
186
187 for (device_find_first_child(bus, &dev);
188 dev;
189 device_find_next_child(&dev)) {
190 pplat = dev_get_parent_platdata(dev);
191 if (pplat->vendor == vendor && pplat->device == device) {
192 if (!(*indexp)--) {
193 *devp = dev;
194 return 0;
195 }
196 }
197 }
198
199 return -ENODEV;
200 }
201
dm_pci_find_device(unsigned int vendor,unsigned int device,int index,struct udevice ** devp)202 int dm_pci_find_device(unsigned int vendor, unsigned int device, int index,
203 struct udevice **devp)
204 {
205 struct udevice *bus;
206
207 /* Scan all known buses */
208 for (uclass_first_device(UCLASS_PCI, &bus);
209 bus;
210 uclass_next_device(&bus)) {
211 if (!dm_pci_bus_find_device(bus, vendor, device, &index, devp))
212 return device_probe(*devp);
213 }
214 *devp = NULL;
215
216 return -ENODEV;
217 }
218
dm_pci_find_class(uint find_class,int index,struct udevice ** devp)219 int dm_pci_find_class(uint find_class, int index, struct udevice **devp)
220 {
221 struct udevice *dev;
222
223 /* Scan all known buses */
224 for (pci_find_first_device(&dev);
225 dev;
226 pci_find_next_device(&dev)) {
227 struct pci_child_platdata *pplat = dev_get_parent_platdata(dev);
228
229 if (pplat->class == find_class && !index--) {
230 *devp = dev;
231 return device_probe(*devp);
232 }
233 }
234 *devp = NULL;
235
236 return -ENODEV;
237 }
238
pci_bus_write_config(struct udevice * bus,pci_dev_t bdf,int offset,unsigned long value,enum pci_size_t size)239 int pci_bus_write_config(struct udevice *bus, pci_dev_t bdf, int offset,
240 unsigned long value, enum pci_size_t size)
241 {
242 struct dm_pci_ops *ops;
243
244 ops = pci_get_ops(bus);
245 if (!ops->write_config)
246 return -ENOSYS;
247 return ops->write_config(bus, bdf, offset, value, size);
248 }
249
pci_bus_clrset_config32(struct udevice * bus,pci_dev_t bdf,int offset,u32 clr,u32 set)250 int pci_bus_clrset_config32(struct udevice *bus, pci_dev_t bdf, int offset,
251 u32 clr, u32 set)
252 {
253 ulong val;
254 int ret;
255
256 ret = pci_bus_read_config(bus, bdf, offset, &val, PCI_SIZE_32);
257 if (ret)
258 return ret;
259 val &= ~clr;
260 val |= set;
261
262 return pci_bus_write_config(bus, bdf, offset, val, PCI_SIZE_32);
263 }
264
pci_write_config(pci_dev_t bdf,int offset,unsigned long value,enum pci_size_t size)265 int pci_write_config(pci_dev_t bdf, int offset, unsigned long value,
266 enum pci_size_t size)
267 {
268 struct udevice *bus;
269 int ret;
270
271 ret = pci_get_bus(PCI_BUS(bdf), &bus);
272 if (ret)
273 return ret;
274
275 return pci_bus_write_config(bus, bdf, offset, value, size);
276 }
277
dm_pci_write_config(struct udevice * dev,int offset,unsigned long value,enum pci_size_t size)278 int dm_pci_write_config(struct udevice *dev, int offset, unsigned long value,
279 enum pci_size_t size)
280 {
281 struct udevice *bus;
282
283 for (bus = dev; device_is_on_pci_bus(bus);)
284 bus = bus->parent;
285 return pci_bus_write_config(bus, dm_pci_get_bdf(dev), offset, value,
286 size);
287 }
288
pci_write_config32(pci_dev_t bdf,int offset,u32 value)289 int pci_write_config32(pci_dev_t bdf, int offset, u32 value)
290 {
291 return pci_write_config(bdf, offset, value, PCI_SIZE_32);
292 }
293
pci_write_config16(pci_dev_t bdf,int offset,u16 value)294 int pci_write_config16(pci_dev_t bdf, int offset, u16 value)
295 {
296 return pci_write_config(bdf, offset, value, PCI_SIZE_16);
297 }
298
pci_write_config8(pci_dev_t bdf,int offset,u8 value)299 int pci_write_config8(pci_dev_t bdf, int offset, u8 value)
300 {
301 return pci_write_config(bdf, offset, value, PCI_SIZE_8);
302 }
303
dm_pci_write_config8(struct udevice * dev,int offset,u8 value)304 int dm_pci_write_config8(struct udevice *dev, int offset, u8 value)
305 {
306 return dm_pci_write_config(dev, offset, value, PCI_SIZE_8);
307 }
308
dm_pci_write_config16(struct udevice * dev,int offset,u16 value)309 int dm_pci_write_config16(struct udevice *dev, int offset, u16 value)
310 {
311 return dm_pci_write_config(dev, offset, value, PCI_SIZE_16);
312 }
313
dm_pci_write_config32(struct udevice * dev,int offset,u32 value)314 int dm_pci_write_config32(struct udevice *dev, int offset, u32 value)
315 {
316 return dm_pci_write_config(dev, offset, value, PCI_SIZE_32);
317 }
318
pci_bus_read_config(struct udevice * bus,pci_dev_t bdf,int offset,unsigned long * valuep,enum pci_size_t size)319 int pci_bus_read_config(struct udevice *bus, pci_dev_t bdf, int offset,
320 unsigned long *valuep, enum pci_size_t size)
321 {
322 struct dm_pci_ops *ops;
323
324 ops = pci_get_ops(bus);
325 if (!ops->read_config)
326 return -ENOSYS;
327 return ops->read_config(bus, bdf, offset, valuep, size);
328 }
329
pci_read_config(pci_dev_t bdf,int offset,unsigned long * valuep,enum pci_size_t size)330 int pci_read_config(pci_dev_t bdf, int offset, unsigned long *valuep,
331 enum pci_size_t size)
332 {
333 struct udevice *bus;
334 int ret;
335
336 ret = pci_get_bus(PCI_BUS(bdf), &bus);
337 if (ret)
338 return ret;
339
340 return pci_bus_read_config(bus, bdf, offset, valuep, size);
341 }
342
dm_pci_read_config(struct udevice * dev,int offset,unsigned long * valuep,enum pci_size_t size)343 int dm_pci_read_config(struct udevice *dev, int offset, unsigned long *valuep,
344 enum pci_size_t size)
345 {
346 struct udevice *bus;
347
348 for (bus = dev; device_is_on_pci_bus(bus);)
349 bus = bus->parent;
350 return pci_bus_read_config(bus, dm_pci_get_bdf(dev), offset, valuep,
351 size);
352 }
353
pci_read_config32(pci_dev_t bdf,int offset,u32 * valuep)354 int pci_read_config32(pci_dev_t bdf, int offset, u32 *valuep)
355 {
356 unsigned long value;
357 int ret;
358
359 ret = pci_read_config(bdf, offset, &value, PCI_SIZE_32);
360 if (ret)
361 return ret;
362 *valuep = value;
363
364 return 0;
365 }
366
pci_read_config16(pci_dev_t bdf,int offset,u16 * valuep)367 int pci_read_config16(pci_dev_t bdf, int offset, u16 *valuep)
368 {
369 unsigned long value;
370 int ret;
371
372 ret = pci_read_config(bdf, offset, &value, PCI_SIZE_16);
373 if (ret)
374 return ret;
375 *valuep = value;
376
377 return 0;
378 }
379
pci_read_config8(pci_dev_t bdf,int offset,u8 * valuep)380 int pci_read_config8(pci_dev_t bdf, int offset, u8 *valuep)
381 {
382 unsigned long value;
383 int ret;
384
385 ret = pci_read_config(bdf, offset, &value, PCI_SIZE_8);
386 if (ret)
387 return ret;
388 *valuep = value;
389
390 return 0;
391 }
392
dm_pci_read_config8(struct udevice * dev,int offset,u8 * valuep)393 int dm_pci_read_config8(struct udevice *dev, int offset, u8 *valuep)
394 {
395 unsigned long value;
396 int ret;
397
398 ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_8);
399 if (ret)
400 return ret;
401 *valuep = value;
402
403 return 0;
404 }
405
dm_pci_read_config16(struct udevice * dev,int offset,u16 * valuep)406 int dm_pci_read_config16(struct udevice *dev, int offset, u16 *valuep)
407 {
408 unsigned long value;
409 int ret;
410
411 ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_16);
412 if (ret)
413 return ret;
414 *valuep = value;
415
416 return 0;
417 }
418
dm_pci_read_config32(struct udevice * dev,int offset,u32 * valuep)419 int dm_pci_read_config32(struct udevice *dev, int offset, u32 *valuep)
420 {
421 unsigned long value;
422 int ret;
423
424 ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_32);
425 if (ret)
426 return ret;
427 *valuep = value;
428
429 return 0;
430 }
431
dm_pci_clrset_config8(struct udevice * dev,int offset,u32 clr,u32 set)432 int dm_pci_clrset_config8(struct udevice *dev, int offset, u32 clr, u32 set)
433 {
434 u8 val;
435 int ret;
436
437 ret = dm_pci_read_config8(dev, offset, &val);
438 if (ret)
439 return ret;
440 val &= ~clr;
441 val |= set;
442
443 return dm_pci_write_config8(dev, offset, val);
444 }
445
dm_pci_clrset_config16(struct udevice * dev,int offset,u32 clr,u32 set)446 int dm_pci_clrset_config16(struct udevice *dev, int offset, u32 clr, u32 set)
447 {
448 u16 val;
449 int ret;
450
451 ret = dm_pci_read_config16(dev, offset, &val);
452 if (ret)
453 return ret;
454 val &= ~clr;
455 val |= set;
456
457 return dm_pci_write_config16(dev, offset, val);
458 }
459
dm_pci_clrset_config32(struct udevice * dev,int offset,u32 clr,u32 set)460 int dm_pci_clrset_config32(struct udevice *dev, int offset, u32 clr, u32 set)
461 {
462 u32 val;
463 int ret;
464
465 ret = dm_pci_read_config32(dev, offset, &val);
466 if (ret)
467 return ret;
468 val &= ~clr;
469 val |= set;
470
471 return dm_pci_write_config32(dev, offset, val);
472 }
473
set_vga_bridge_bits(struct udevice * dev)474 static void set_vga_bridge_bits(struct udevice *dev)
475 {
476 struct udevice *parent = dev->parent;
477 u16 bc;
478
479 while (parent->seq != 0) {
480 dm_pci_read_config16(parent, PCI_BRIDGE_CONTROL, &bc);
481 bc |= PCI_BRIDGE_CTL_VGA;
482 dm_pci_write_config16(parent, PCI_BRIDGE_CONTROL, bc);
483 parent = parent->parent;
484 }
485 }
486
pci_auto_config_devices(struct udevice * bus)487 int pci_auto_config_devices(struct udevice *bus)
488 {
489 struct pci_controller *hose = bus->uclass_priv;
490 struct pci_child_platdata *pplat;
491 unsigned int sub_bus;
492 struct udevice *dev;
493 int ret;
494
495 sub_bus = bus->seq;
496 debug("%s: start\n", __func__);
497 pciauto_config_init(hose);
498 for (ret = device_find_first_child(bus, &dev);
499 !ret && dev;
500 ret = device_find_next_child(&dev)) {
501 unsigned int max_bus;
502 int ret;
503
504 debug("%s: device %s\n", __func__, dev->name);
505 ret = dm_pciauto_config_device(dev);
506 if (ret < 0)
507 return ret;
508 max_bus = ret;
509 sub_bus = max(sub_bus, max_bus);
510
511 pplat = dev_get_parent_platdata(dev);
512 if (pplat->class == (PCI_CLASS_DISPLAY_VGA << 8))
513 set_vga_bridge_bits(dev);
514 }
515 debug("%s: done\n", __func__);
516
517 return sub_bus;
518 }
519
pci_generic_mmap_write_config(struct udevice * bus,int (* addr_f)(struct udevice * bus,pci_dev_t bdf,uint offset,void ** addrp),pci_dev_t bdf,uint offset,ulong value,enum pci_size_t size)520 int pci_generic_mmap_write_config(
521 struct udevice *bus,
522 int (*addr_f)(struct udevice *bus, pci_dev_t bdf, uint offset, void **addrp),
523 pci_dev_t bdf,
524 uint offset,
525 ulong value,
526 enum pci_size_t size)
527 {
528 void *address;
529
530 if (addr_f(bus, bdf, offset, &address) < 0)
531 return 0;
532
533 switch (size) {
534 case PCI_SIZE_8:
535 writeb(value, address);
536 return 0;
537 case PCI_SIZE_16:
538 writew(value, address);
539 return 0;
540 case PCI_SIZE_32:
541 writel(value, address);
542 return 0;
543 default:
544 return -EINVAL;
545 }
546 }
547
pci_generic_mmap_read_config(struct udevice * bus,int (* addr_f)(struct udevice * bus,pci_dev_t bdf,uint offset,void ** addrp),pci_dev_t bdf,uint offset,ulong * valuep,enum pci_size_t size)548 int pci_generic_mmap_read_config(
549 struct udevice *bus,
550 int (*addr_f)(struct udevice *bus, pci_dev_t bdf, uint offset, void **addrp),
551 pci_dev_t bdf,
552 uint offset,
553 ulong *valuep,
554 enum pci_size_t size)
555 {
556 void *address;
557
558 if (addr_f(bus, bdf, offset, &address) < 0) {
559 *valuep = pci_get_ff(size);
560 return 0;
561 }
562
563 switch (size) {
564 case PCI_SIZE_8:
565 *valuep = readb(address);
566 return 0;
567 case PCI_SIZE_16:
568 *valuep = readw(address);
569 return 0;
570 case PCI_SIZE_32:
571 *valuep = readl(address);
572 return 0;
573 default:
574 return -EINVAL;
575 }
576 }
577
dm_pci_hose_probe_bus(struct udevice * bus)578 int dm_pci_hose_probe_bus(struct udevice *bus)
579 {
580 int sub_bus;
581 int ret;
582
583 debug("%s\n", __func__);
584
585 sub_bus = pci_get_bus_max() + 1;
586 debug("%s: bus = %d/%s\n", __func__, sub_bus, bus->name);
587 dm_pciauto_prescan_setup_bridge(bus, sub_bus);
588
589 ret = device_probe(bus);
590 if (ret) {
591 debug("%s: Cannot probe bus %s: %d\n", __func__, bus->name,
592 ret);
593 return ret;
594 }
595 if (sub_bus != bus->seq) {
596 printf("%s: Internal error, bus '%s' got seq %d, expected %d\n",
597 __func__, bus->name, bus->seq, sub_bus);
598 return -EPIPE;
599 }
600 sub_bus = pci_get_bus_max();
601 dm_pciauto_postscan_setup_bridge(bus, sub_bus);
602
603 return sub_bus;
604 }
605
606 /**
607 * pci_match_one_device - Tell if a PCI device structure has a matching
608 * PCI device id structure
609 * @id: single PCI device id structure to match
610 * @find: the PCI device id structure to match against
611 *
612 * Returns true if the finding pci_device_id structure matched or false if
613 * there is no match.
614 */
pci_match_one_id(const struct pci_device_id * id,const struct pci_device_id * find)615 static bool pci_match_one_id(const struct pci_device_id *id,
616 const struct pci_device_id *find)
617 {
618 if ((id->vendor == PCI_ANY_ID || id->vendor == find->vendor) &&
619 (id->device == PCI_ANY_ID || id->device == find->device) &&
620 (id->subvendor == PCI_ANY_ID || id->subvendor == find->subvendor) &&
621 (id->subdevice == PCI_ANY_ID || id->subdevice == find->subdevice) &&
622 !((id->class ^ find->class) & id->class_mask))
623 return true;
624
625 return false;
626 }
627
628 /**
629 * pci_find_and_bind_driver() - Find and bind the right PCI driver
630 *
631 * This only looks at certain fields in the descriptor.
632 *
633 * @parent: Parent bus
634 * @find_id: Specification of the driver to find
635 * @bdf: Bus/device/function addreess - see PCI_BDF()
636 * @devp: Returns a pointer to the device created
637 * @return 0 if OK, -EPERM if the device is not needed before relocation and
638 * therefore was not created, other -ve value on error
639 */
pci_find_and_bind_driver(struct udevice * parent,struct pci_device_id * find_id,pci_dev_t bdf,struct udevice ** devp)640 static int pci_find_and_bind_driver(struct udevice *parent,
641 struct pci_device_id *find_id,
642 pci_dev_t bdf, struct udevice **devp)
643 {
644 struct pci_driver_entry *start, *entry;
645 const char *drv;
646 int n_ents;
647 int ret;
648 char name[30], *str;
649 bool bridge;
650
651 *devp = NULL;
652
653 debug("%s: Searching for driver: vendor=%x, device=%x\n", __func__,
654 find_id->vendor, find_id->device);
655 start = ll_entry_start(struct pci_driver_entry, pci_driver_entry);
656 n_ents = ll_entry_count(struct pci_driver_entry, pci_driver_entry);
657 for (entry = start; entry != start + n_ents; entry++) {
658 const struct pci_device_id *id;
659 struct udevice *dev;
660 const struct driver *drv;
661
662 for (id = entry->match;
663 id->vendor || id->subvendor || id->class_mask;
664 id++) {
665 if (!pci_match_one_id(id, find_id))
666 continue;
667
668 drv = entry->driver;
669
670 /*
671 * In the pre-relocation phase, we only bind devices
672 * whose driver has the DM_FLAG_PRE_RELOC set, to save
673 * precious memory space as on some platforms as that
674 * space is pretty limited (ie: using Cache As RAM).
675 */
676 if (!(gd->flags & GD_FLG_RELOC) &&
677 !(drv->flags & DM_FLAG_PRE_RELOC))
678 return -EPERM;
679
680 /*
681 * We could pass the descriptor to the driver as
682 * platdata (instead of NULL) and allow its bind()
683 * method to return -ENOENT if it doesn't support this
684 * device. That way we could continue the search to
685 * find another driver. For now this doesn't seem
686 * necesssary, so just bind the first match.
687 */
688 ret = device_bind(parent, drv, drv->name, NULL, -1,
689 &dev);
690 if (ret)
691 goto error;
692 debug("%s: Match found: %s\n", __func__, drv->name);
693 dev->driver_data = find_id->driver_data;
694 *devp = dev;
695 return 0;
696 }
697 }
698
699 bridge = (find_id->class >> 8) == PCI_CLASS_BRIDGE_PCI;
700 /*
701 * In the pre-relocation phase, we only bind bridge devices to save
702 * precious memory space as on some platforms as that space is pretty
703 * limited (ie: using Cache As RAM).
704 */
705 if (!(gd->flags & GD_FLG_RELOC) && !bridge)
706 return -EPERM;
707
708 /* Bind a generic driver so that the device can be used */
709 sprintf(name, "pci_%x:%x.%x", parent->seq, PCI_DEV(bdf),
710 PCI_FUNC(bdf));
711 str = strdup(name);
712 if (!str)
713 return -ENOMEM;
714 drv = bridge ? "pci_bridge_drv" : "pci_generic_drv";
715
716 ret = device_bind_driver(parent, drv, str, devp);
717 if (ret) {
718 debug("%s: Failed to bind generic driver: %d\n", __func__, ret);
719 free(str);
720 return ret;
721 }
722 debug("%s: No match found: bound generic driver instead\n", __func__);
723
724 return 0;
725
726 error:
727 debug("%s: No match found: error %d\n", __func__, ret);
728 return ret;
729 }
730
pci_bind_bus_devices(struct udevice * bus)731 int pci_bind_bus_devices(struct udevice *bus)
732 {
733 ulong vendor, device;
734 ulong header_type;
735 pci_dev_t bdf, end;
736 bool found_multi;
737 int ret;
738
739 found_multi = false;
740 end = PCI_BDF(bus->seq, PCI_MAX_PCI_DEVICES - 1,
741 PCI_MAX_PCI_FUNCTIONS - 1);
742 for (bdf = PCI_BDF(bus->seq, 0, 0); bdf <= end;
743 bdf += PCI_BDF(0, 0, 1)) {
744 struct pci_child_platdata *pplat;
745 struct udevice *dev;
746 ulong class;
747
748 if (PCI_FUNC(bdf) && !found_multi)
749 continue;
750 /* Check only the first access, we don't expect problems */
751 ret = pci_bus_read_config(bus, bdf, PCI_HEADER_TYPE,
752 &header_type, PCI_SIZE_8);
753 if (ret)
754 goto error;
755 pci_bus_read_config(bus, bdf, PCI_VENDOR_ID, &vendor,
756 PCI_SIZE_16);
757 if (vendor == 0xffff || vendor == 0x0000)
758 continue;
759
760 if (!PCI_FUNC(bdf))
761 found_multi = header_type & 0x80;
762
763 debug("%s: bus %d/%s: found device %x, function %d\n", __func__,
764 bus->seq, bus->name, PCI_DEV(bdf), PCI_FUNC(bdf));
765 pci_bus_read_config(bus, bdf, PCI_DEVICE_ID, &device,
766 PCI_SIZE_16);
767 pci_bus_read_config(bus, bdf, PCI_CLASS_REVISION, &class,
768 PCI_SIZE_32);
769 class >>= 8;
770
771 /* Find this device in the device tree */
772 ret = pci_bus_find_devfn(bus, PCI_MASK_BUS(bdf), &dev);
773
774 /* If nothing in the device tree, bind a device */
775 if (ret == -ENODEV) {
776 struct pci_device_id find_id;
777 ulong val;
778
779 memset(&find_id, '\0', sizeof(find_id));
780 find_id.vendor = vendor;
781 find_id.device = device;
782 find_id.class = class;
783 if ((header_type & 0x7f) == PCI_HEADER_TYPE_NORMAL) {
784 pci_bus_read_config(bus, bdf,
785 PCI_SUBSYSTEM_VENDOR_ID,
786 &val, PCI_SIZE_32);
787 find_id.subvendor = val & 0xffff;
788 find_id.subdevice = val >> 16;
789 }
790 ret = pci_find_and_bind_driver(bus, &find_id, bdf,
791 &dev);
792 }
793 if (ret == -EPERM)
794 continue;
795 else if (ret)
796 return ret;
797
798 /* Update the platform data */
799 pplat = dev_get_parent_platdata(dev);
800 pplat->devfn = PCI_MASK_BUS(bdf);
801 pplat->vendor = vendor;
802 pplat->device = device;
803 pplat->class = class;
804 }
805
806 return 0;
807 error:
808 printf("Cannot read bus configuration: %d\n", ret);
809
810 return ret;
811 }
812
decode_regions(struct pci_controller * hose,ofnode parent_node,ofnode node)813 static void decode_regions(struct pci_controller *hose, ofnode parent_node,
814 ofnode node)
815 {
816 int pci_addr_cells, addr_cells, size_cells;
817 int cells_per_record;
818 const u32 *prop;
819 int len;
820 int i;
821
822 prop = ofnode_get_property(node, "ranges", &len);
823 if (!prop) {
824 debug("%s: Cannot decode regions\n", __func__);
825 return;
826 }
827
828 pci_addr_cells = ofnode_read_simple_addr_cells(node);
829 addr_cells = ofnode_read_simple_addr_cells(parent_node);
830 size_cells = ofnode_read_simple_size_cells(node);
831
832 /* PCI addresses are always 3-cells */
833 len /= sizeof(u32);
834 cells_per_record = pci_addr_cells + addr_cells + size_cells;
835 hose->region_count = 0;
836 debug("%s: len=%d, cells_per_record=%d\n", __func__, len,
837 cells_per_record);
838 for (i = 0; i < MAX_PCI_REGIONS; i++, len -= cells_per_record) {
839 u64 pci_addr, addr, size;
840 int space_code;
841 u32 flags;
842 int type;
843 int pos;
844
845 if (len < cells_per_record)
846 break;
847 flags = fdt32_to_cpu(prop[0]);
848 space_code = (flags >> 24) & 3;
849 pci_addr = fdtdec_get_number(prop + 1, 2);
850 prop += pci_addr_cells;
851 addr = fdtdec_get_number(prop, addr_cells);
852 prop += addr_cells;
853 size = fdtdec_get_number(prop, size_cells);
854 prop += size_cells;
855 debug("%s: region %d, pci_addr=%" PRIx64 ", addr=%" PRIx64
856 ", size=%" PRIx64 ", space_code=%d\n", __func__,
857 hose->region_count, pci_addr, addr, size, space_code);
858 if (space_code & 2) {
859 type = flags & (1U << 30) ? PCI_REGION_PREFETCH :
860 PCI_REGION_MEM;
861 } else if (space_code & 1) {
862 type = PCI_REGION_IO;
863 } else {
864 continue;
865 }
866
867 if (!IS_ENABLED(CONFIG_SYS_PCI_64BIT) &&
868 type == PCI_REGION_MEM && upper_32_bits(pci_addr)) {
869 debug(" - beyond the 32-bit boundary, ignoring\n");
870 continue;
871 }
872
873 pos = -1;
874 for (i = 0; i < hose->region_count; i++) {
875 if (hose->regions[i].flags == type)
876 pos = i;
877 }
878 if (pos == -1)
879 pos = hose->region_count++;
880 debug(" - type=%d, pos=%d\n", type, pos);
881 pci_set_region(hose->regions + pos, pci_addr, addr, size, type);
882 }
883
884 /* Add a region for our local memory */
885 #ifdef CONFIG_NR_DRAM_BANKS
886 bd_t *bd = gd->bd;
887
888 if (!bd)
889 return;
890
891 for (i = 0; i < CONFIG_NR_DRAM_BANKS; ++i) {
892 if (bd->bi_dram[i].size) {
893 pci_set_region(hose->regions + hose->region_count++,
894 bd->bi_dram[i].start,
895 bd->bi_dram[i].start,
896 bd->bi_dram[i].size,
897 PCI_REGION_MEM | PCI_REGION_SYS_MEMORY);
898 }
899 }
900 #else
901 phys_addr_t base = 0, size;
902
903 size = gd->ram_size;
904 #ifdef CONFIG_SYS_SDRAM_BASE
905 base = CONFIG_SYS_SDRAM_BASE;
906 #endif
907 if (gd->pci_ram_top && gd->pci_ram_top < base + size)
908 size = gd->pci_ram_top - base;
909 if (size)
910 pci_set_region(hose->regions + hose->region_count++, base,
911 base, size, PCI_REGION_MEM | PCI_REGION_SYS_MEMORY);
912 #endif
913
914 return;
915 }
916
pci_uclass_pre_probe(struct udevice * bus)917 static int pci_uclass_pre_probe(struct udevice *bus)
918 {
919 struct pci_controller *hose;
920
921 debug("%s, bus=%d/%s, parent=%s\n", __func__, bus->seq, bus->name,
922 bus->parent->name);
923 hose = bus->uclass_priv;
924
925 /* For bridges, use the top-level PCI controller */
926 if (!device_is_on_pci_bus(bus)) {
927 hose->ctlr = bus;
928 decode_regions(hose, dev_ofnode(bus->parent), dev_ofnode(bus));
929 } else {
930 struct pci_controller *parent_hose;
931
932 parent_hose = dev_get_uclass_priv(bus->parent);
933 hose->ctlr = parent_hose->bus;
934 }
935 hose->bus = bus;
936 hose->first_busno = bus->seq;
937 hose->last_busno = bus->seq;
938
939 return 0;
940 }
941
pci_uclass_post_probe(struct udevice * bus)942 static int pci_uclass_post_probe(struct udevice *bus)
943 {
944 int ret;
945
946 debug("%s: probing bus %d\n", __func__, bus->seq);
947 ret = pci_bind_bus_devices(bus);
948 if (ret)
949 return ret;
950
951 #ifdef CONFIG_PCI_PNP
952 ret = pci_auto_config_devices(bus);
953 if (ret < 0)
954 return ret;
955 #endif
956
957 #if defined(CONFIG_X86) && defined(CONFIG_HAVE_FSP)
958 /*
959 * Per Intel FSP specification, we should call FSP notify API to
960 * inform FSP that PCI enumeration has been done so that FSP will
961 * do any necessary initialization as required by the chipset's
962 * BIOS Writer's Guide (BWG).
963 *
964 * Unfortunately we have to put this call here as with driver model,
965 * the enumeration is all done on a lazy basis as needed, so until
966 * something is touched on PCI it won't happen.
967 *
968 * Note we only call this 1) after U-Boot is relocated, and 2)
969 * root bus has finished probing.
970 */
971 if ((gd->flags & GD_FLG_RELOC) && (bus->seq == 0)) {
972 ret = fsp_init_phase_pci();
973 if (ret)
974 return ret;
975 }
976 #endif
977
978 return 0;
979 }
980
pci_uclass_child_post_bind(struct udevice * dev)981 static int pci_uclass_child_post_bind(struct udevice *dev)
982 {
983 struct pci_child_platdata *pplat;
984 struct fdt_pci_addr addr;
985 int ret;
986
987 if (!dev_of_valid(dev))
988 return 0;
989
990 /*
991 * We could read vendor, device, class if available. But for now we
992 * just check the address.
993 */
994 pplat = dev_get_parent_platdata(dev);
995 ret = ofnode_read_pci_addr(dev_ofnode(dev), FDT_PCI_SPACE_CONFIG, "reg",
996 &addr);
997
998 if (ret) {
999 if (ret != -ENOENT)
1000 return -EINVAL;
1001 } else {
1002 /* extract the devfn from fdt_pci_addr */
1003 pplat->devfn = addr.phys_hi & 0xff00;
1004 }
1005
1006 return 0;
1007 }
1008
pci_bridge_read_config(struct udevice * bus,pci_dev_t bdf,uint offset,ulong * valuep,enum pci_size_t size)1009 static int pci_bridge_read_config(struct udevice *bus, pci_dev_t bdf,
1010 uint offset, ulong *valuep,
1011 enum pci_size_t size)
1012 {
1013 struct pci_controller *hose = bus->uclass_priv;
1014
1015 return pci_bus_read_config(hose->ctlr, bdf, offset, valuep, size);
1016 }
1017
pci_bridge_write_config(struct udevice * bus,pci_dev_t bdf,uint offset,ulong value,enum pci_size_t size)1018 static int pci_bridge_write_config(struct udevice *bus, pci_dev_t bdf,
1019 uint offset, ulong value,
1020 enum pci_size_t size)
1021 {
1022 struct pci_controller *hose = bus->uclass_priv;
1023
1024 return pci_bus_write_config(hose->ctlr, bdf, offset, value, size);
1025 }
1026
skip_to_next_device(struct udevice * bus,struct udevice ** devp)1027 static int skip_to_next_device(struct udevice *bus, struct udevice **devp)
1028 {
1029 struct udevice *dev;
1030 int ret = 0;
1031
1032 /*
1033 * Scan through all the PCI controllers. On x86 there will only be one
1034 * but that is not necessarily true on other hardware.
1035 */
1036 do {
1037 device_find_first_child(bus, &dev);
1038 if (dev) {
1039 *devp = dev;
1040 return 0;
1041 }
1042 ret = uclass_next_device(&bus);
1043 if (ret)
1044 return ret;
1045 } while (bus);
1046
1047 return 0;
1048 }
1049
pci_find_next_device(struct udevice ** devp)1050 int pci_find_next_device(struct udevice **devp)
1051 {
1052 struct udevice *child = *devp;
1053 struct udevice *bus = child->parent;
1054 int ret;
1055
1056 /* First try all the siblings */
1057 *devp = NULL;
1058 while (child) {
1059 device_find_next_child(&child);
1060 if (child) {
1061 *devp = child;
1062 return 0;
1063 }
1064 }
1065
1066 /* We ran out of siblings. Try the next bus */
1067 ret = uclass_next_device(&bus);
1068 if (ret)
1069 return ret;
1070
1071 return bus ? skip_to_next_device(bus, devp) : 0;
1072 }
1073
pci_find_first_device(struct udevice ** devp)1074 int pci_find_first_device(struct udevice **devp)
1075 {
1076 struct udevice *bus;
1077 int ret;
1078
1079 *devp = NULL;
1080 ret = uclass_first_device(UCLASS_PCI, &bus);
1081 if (ret)
1082 return ret;
1083
1084 return skip_to_next_device(bus, devp);
1085 }
1086
pci_conv_32_to_size(ulong value,uint offset,enum pci_size_t size)1087 ulong pci_conv_32_to_size(ulong value, uint offset, enum pci_size_t size)
1088 {
1089 switch (size) {
1090 case PCI_SIZE_8:
1091 return (value >> ((offset & 3) * 8)) & 0xff;
1092 case PCI_SIZE_16:
1093 return (value >> ((offset & 2) * 8)) & 0xffff;
1094 default:
1095 return value;
1096 }
1097 }
1098
pci_conv_size_to_32(ulong old,ulong value,uint offset,enum pci_size_t size)1099 ulong pci_conv_size_to_32(ulong old, ulong value, uint offset,
1100 enum pci_size_t size)
1101 {
1102 uint off_mask;
1103 uint val_mask, shift;
1104 ulong ldata, mask;
1105
1106 switch (size) {
1107 case PCI_SIZE_8:
1108 off_mask = 3;
1109 val_mask = 0xff;
1110 break;
1111 case PCI_SIZE_16:
1112 off_mask = 2;
1113 val_mask = 0xffff;
1114 break;
1115 default:
1116 return value;
1117 }
1118 shift = (offset & off_mask) * 8;
1119 ldata = (value & val_mask) << shift;
1120 mask = val_mask << shift;
1121 value = (old & ~mask) | ldata;
1122
1123 return value;
1124 }
1125
pci_get_regions(struct udevice * dev,struct pci_region ** iop,struct pci_region ** memp,struct pci_region ** prefp)1126 int pci_get_regions(struct udevice *dev, struct pci_region **iop,
1127 struct pci_region **memp, struct pci_region **prefp)
1128 {
1129 struct udevice *bus = pci_get_controller(dev);
1130 struct pci_controller *hose = dev_get_uclass_priv(bus);
1131 int i;
1132
1133 *iop = NULL;
1134 *memp = NULL;
1135 *prefp = NULL;
1136 for (i = 0; i < hose->region_count; i++) {
1137 switch (hose->regions[i].flags) {
1138 case PCI_REGION_IO:
1139 if (!*iop || (*iop)->size < hose->regions[i].size)
1140 *iop = hose->regions + i;
1141 break;
1142 case PCI_REGION_MEM:
1143 if (!*memp || (*memp)->size < hose->regions[i].size)
1144 *memp = hose->regions + i;
1145 break;
1146 case (PCI_REGION_MEM | PCI_REGION_PREFETCH):
1147 if (!*prefp || (*prefp)->size < hose->regions[i].size)
1148 *prefp = hose->regions + i;
1149 break;
1150 }
1151 }
1152
1153 return (*iop != NULL) + (*memp != NULL) + (*prefp != NULL);
1154 }
1155
dm_pci_read_bar32(struct udevice * dev,int barnum)1156 u32 dm_pci_read_bar32(struct udevice *dev, int barnum)
1157 {
1158 u32 addr;
1159 int bar;
1160
1161 bar = PCI_BASE_ADDRESS_0 + barnum * 4;
1162 dm_pci_read_config32(dev, bar, &addr);
1163 if (addr & PCI_BASE_ADDRESS_SPACE_IO)
1164 return addr & PCI_BASE_ADDRESS_IO_MASK;
1165 else
1166 return addr & PCI_BASE_ADDRESS_MEM_MASK;
1167 }
1168
dm_pci_write_bar32(struct udevice * dev,int barnum,u32 addr)1169 void dm_pci_write_bar32(struct udevice *dev, int barnum, u32 addr)
1170 {
1171 int bar;
1172
1173 bar = PCI_BASE_ADDRESS_0 + barnum * 4;
1174 dm_pci_write_config32(dev, bar, addr);
1175 }
1176
_dm_pci_bus_to_phys(struct udevice * ctlr,pci_addr_t bus_addr,unsigned long flags,unsigned long skip_mask,phys_addr_t * pa)1177 static int _dm_pci_bus_to_phys(struct udevice *ctlr,
1178 pci_addr_t bus_addr, unsigned long flags,
1179 unsigned long skip_mask, phys_addr_t *pa)
1180 {
1181 struct pci_controller *hose = dev_get_uclass_priv(ctlr);
1182 struct pci_region *res;
1183 int i;
1184
1185 if (hose->region_count == 0) {
1186 *pa = bus_addr;
1187 return 0;
1188 }
1189
1190 for (i = 0; i < hose->region_count; i++) {
1191 res = &hose->regions[i];
1192
1193 if (((res->flags ^ flags) & PCI_REGION_TYPE) != 0)
1194 continue;
1195
1196 if (res->flags & skip_mask)
1197 continue;
1198
1199 if (bus_addr >= res->bus_start &&
1200 (bus_addr - res->bus_start) < res->size) {
1201 *pa = (bus_addr - res->bus_start + res->phys_start);
1202 return 0;
1203 }
1204 }
1205
1206 return 1;
1207 }
1208
dm_pci_bus_to_phys(struct udevice * dev,pci_addr_t bus_addr,unsigned long flags)1209 phys_addr_t dm_pci_bus_to_phys(struct udevice *dev, pci_addr_t bus_addr,
1210 unsigned long flags)
1211 {
1212 phys_addr_t phys_addr = 0;
1213 struct udevice *ctlr;
1214 int ret;
1215
1216 /* The root controller has the region information */
1217 ctlr = pci_get_controller(dev);
1218
1219 /*
1220 * if PCI_REGION_MEM is set we do a two pass search with preference
1221 * on matches that don't have PCI_REGION_SYS_MEMORY set
1222 */
1223 if ((flags & PCI_REGION_TYPE) == PCI_REGION_MEM) {
1224 ret = _dm_pci_bus_to_phys(ctlr, bus_addr,
1225 flags, PCI_REGION_SYS_MEMORY,
1226 &phys_addr);
1227 if (!ret)
1228 return phys_addr;
1229 }
1230
1231 ret = _dm_pci_bus_to_phys(ctlr, bus_addr, flags, 0, &phys_addr);
1232
1233 if (ret)
1234 puts("pci_hose_bus_to_phys: invalid physical address\n");
1235
1236 return phys_addr;
1237 }
1238
_dm_pci_phys_to_bus(struct udevice * dev,phys_addr_t phys_addr,unsigned long flags,unsigned long skip_mask,pci_addr_t * ba)1239 int _dm_pci_phys_to_bus(struct udevice *dev, phys_addr_t phys_addr,
1240 unsigned long flags, unsigned long skip_mask,
1241 pci_addr_t *ba)
1242 {
1243 struct pci_region *res;
1244 struct udevice *ctlr;
1245 pci_addr_t bus_addr;
1246 int i;
1247 struct pci_controller *hose;
1248
1249 /* The root controller has the region information */
1250 ctlr = pci_get_controller(dev);
1251 hose = dev_get_uclass_priv(ctlr);
1252
1253 if (hose->region_count == 0) {
1254 *ba = phys_addr;
1255 return 0;
1256 }
1257
1258 for (i = 0; i < hose->region_count; i++) {
1259 res = &hose->regions[i];
1260
1261 if (((res->flags ^ flags) & PCI_REGION_TYPE) != 0)
1262 continue;
1263
1264 if (res->flags & skip_mask)
1265 continue;
1266
1267 bus_addr = phys_addr - res->phys_start + res->bus_start;
1268
1269 if (bus_addr >= res->bus_start &&
1270 (bus_addr - res->bus_start) < res->size) {
1271 *ba = bus_addr;
1272 return 0;
1273 }
1274 }
1275
1276 return 1;
1277 }
1278
dm_pci_phys_to_bus(struct udevice * dev,phys_addr_t phys_addr,unsigned long flags)1279 pci_addr_t dm_pci_phys_to_bus(struct udevice *dev, phys_addr_t phys_addr,
1280 unsigned long flags)
1281 {
1282 pci_addr_t bus_addr = 0;
1283 int ret;
1284
1285 /*
1286 * if PCI_REGION_MEM is set we do a two pass search with preference
1287 * on matches that don't have PCI_REGION_SYS_MEMORY set
1288 */
1289 if ((flags & PCI_REGION_TYPE) == PCI_REGION_MEM) {
1290 ret = _dm_pci_phys_to_bus(dev, phys_addr, flags,
1291 PCI_REGION_SYS_MEMORY, &bus_addr);
1292 if (!ret)
1293 return bus_addr;
1294 }
1295
1296 ret = _dm_pci_phys_to_bus(dev, phys_addr, flags, 0, &bus_addr);
1297
1298 if (ret)
1299 puts("pci_hose_phys_to_bus: invalid physical address\n");
1300
1301 return bus_addr;
1302 }
1303
dm_pci_map_bar(struct udevice * dev,int bar,int flags)1304 void *dm_pci_map_bar(struct udevice *dev, int bar, int flags)
1305 {
1306 pci_addr_t pci_bus_addr;
1307 u32 bar_response;
1308
1309 /* read BAR address */
1310 dm_pci_read_config32(dev, bar, &bar_response);
1311 pci_bus_addr = (pci_addr_t)(bar_response & ~0xf);
1312
1313 /*
1314 * Pass "0" as the length argument to pci_bus_to_virt. The arg
1315 * isn't actualy used on any platform because u-boot assumes a static
1316 * linear mapping. In the future, this could read the BAR size
1317 * and pass that as the size if needed.
1318 */
1319 return dm_pci_bus_to_virt(dev, pci_bus_addr, flags, 0, MAP_NOCACHE);
1320 }
1321
1322 UCLASS_DRIVER(pci) = {
1323 .id = UCLASS_PCI,
1324 .name = "pci",
1325 .flags = DM_UC_FLAG_SEQ_ALIAS,
1326 .post_bind = dm_scan_fdt_dev,
1327 .pre_probe = pci_uclass_pre_probe,
1328 .post_probe = pci_uclass_post_probe,
1329 .child_post_bind = pci_uclass_child_post_bind,
1330 .per_device_auto_alloc_size = sizeof(struct pci_controller),
1331 .per_child_platdata_auto_alloc_size =
1332 sizeof(struct pci_child_platdata),
1333 };
1334
1335 static const struct dm_pci_ops pci_bridge_ops = {
1336 .read_config = pci_bridge_read_config,
1337 .write_config = pci_bridge_write_config,
1338 };
1339
1340 static const struct udevice_id pci_bridge_ids[] = {
1341 { .compatible = "pci-bridge" },
1342 { }
1343 };
1344
1345 U_BOOT_DRIVER(pci_bridge_drv) = {
1346 .name = "pci_bridge_drv",
1347 .id = UCLASS_PCI,
1348 .of_match = pci_bridge_ids,
1349 .ops = &pci_bridge_ops,
1350 };
1351
1352 UCLASS_DRIVER(pci_generic) = {
1353 .id = UCLASS_PCI_GENERIC,
1354 .name = "pci_generic",
1355 };
1356
1357 static const struct udevice_id pci_generic_ids[] = {
1358 { .compatible = "pci-generic" },
1359 { }
1360 };
1361
1362 U_BOOT_DRIVER(pci_generic_drv) = {
1363 .name = "pci_generic_drv",
1364 .id = UCLASS_PCI_GENERIC,
1365 .of_match = pci_generic_ids,
1366 };
1367
pci_init(void)1368 void pci_init(void)
1369 {
1370 struct udevice *bus;
1371
1372 /*
1373 * Enumerate all known controller devices. Enumeration has the side-
1374 * effect of probing them, so PCIe devices will be enumerated too.
1375 */
1376 for (uclass_first_device(UCLASS_PCI, &bus);
1377 bus;
1378 uclass_next_device(&bus)) {
1379 ;
1380 }
1381 }
1382