• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * PCI / PCI-X / PCI-Express support for 4xx parts
3  *
4  * Copyright 2007 Ben. Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
5  *
6  * Most PCI Express code is coming from Stefan Roese implementation for
7  * arch/ppc in the Denx tree, slightly reworked by me.
8  *
9  * Copyright 2007 DENX Software Engineering, Stefan Roese <sr@denx.de>
10  *
11  * Some of that comes itself from a previous implementation for 440SPE only
12  * by Roland Dreier:
13  *
14  * Copyright (c) 2005 Cisco Systems.  All rights reserved.
15  * Roland Dreier <rolandd@cisco.com>
16  *
17  */
18 
19 #undef DEBUG
20 
21 #include <linux/kernel.h>
22 #include <linux/pci.h>
23 #include <linux/init.h>
24 #include <linux/of.h>
25 #include <linux/bootmem.h>
26 #include <linux/delay.h>
27 #include <linux/slab.h>
28 
29 #include <asm/io.h>
30 #include <asm/pci-bridge.h>
31 #include <asm/machdep.h>
32 #include <asm/dcr.h>
33 #include <asm/dcr-regs.h>
34 #include <mm/mmu_decl.h>
35 
36 #include "ppc4xx_pci.h"
37 
38 static int dma_offset_set;
39 
40 #define U64_TO_U32_LOW(val)	((u32)((val) & 0x00000000ffffffffULL))
41 #define U64_TO_U32_HIGH(val)	((u32)((val) >> 32))
42 
43 #define RES_TO_U32_LOW(val)	\
44 	((sizeof(resource_size_t) > sizeof(u32)) ? U64_TO_U32_LOW(val) : (val))
45 #define RES_TO_U32_HIGH(val)	\
46 	((sizeof(resource_size_t) > sizeof(u32)) ? U64_TO_U32_HIGH(val) : (0))
47 
ppc440spe_revA(void)48 static inline int ppc440spe_revA(void)
49 {
50 	/* Catch both 440SPe variants, with and without RAID6 support */
51         if ((mfspr(SPRN_PVR) & 0xffefffff) == 0x53421890)
52                 return 1;
53         else
54                 return 0;
55 }
56 
fixup_ppc4xx_pci_bridge(struct pci_dev * dev)57 static void fixup_ppc4xx_pci_bridge(struct pci_dev *dev)
58 {
59 	struct pci_controller *hose;
60 	int i;
61 
62 	if (dev->devfn != 0 || dev->bus->self != NULL)
63 		return;
64 
65 	hose = pci_bus_to_host(dev->bus);
66 	if (hose == NULL)
67 		return;
68 
69 	if (!of_device_is_compatible(hose->dn, "ibm,plb-pciex") &&
70 	    !of_device_is_compatible(hose->dn, "ibm,plb-pcix") &&
71 	    !of_device_is_compatible(hose->dn, "ibm,plb-pci"))
72 		return;
73 
74 	if (of_device_is_compatible(hose->dn, "ibm,plb440epx-pci") ||
75 		of_device_is_compatible(hose->dn, "ibm,plb440grx-pci")) {
76 		hose->indirect_type |= PPC_INDIRECT_TYPE_BROKEN_MRM;
77 	}
78 
79 	/* Hide the PCI host BARs from the kernel as their content doesn't
80 	 * fit well in the resource management
81 	 */
82 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
83 		dev->resource[i].start = dev->resource[i].end = 0;
84 		dev->resource[i].flags = 0;
85 	}
86 
87 	printk(KERN_INFO "PCI: Hiding 4xx host bridge resources %s\n",
88 	       pci_name(dev));
89 }
90 DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, fixup_ppc4xx_pci_bridge);
91 
ppc4xx_parse_dma_ranges(struct pci_controller * hose,void __iomem * reg,struct resource * res)92 static int __init ppc4xx_parse_dma_ranges(struct pci_controller *hose,
93 					  void __iomem *reg,
94 					  struct resource *res)
95 {
96 	u64 size;
97 	const u32 *ranges;
98 	int rlen;
99 	int pna = of_n_addr_cells(hose->dn);
100 	int np = pna + 5;
101 
102 	/* Default */
103 	res->start = 0;
104 	size = 0x80000000;
105 	res->end = size - 1;
106 	res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
107 
108 	/* Get dma-ranges property */
109 	ranges = of_get_property(hose->dn, "dma-ranges", &rlen);
110 	if (ranges == NULL)
111 		goto out;
112 
113 	/* Walk it */
114 	while ((rlen -= np * 4) >= 0) {
115 		u32 pci_space = ranges[0];
116 		u64 pci_addr = of_read_number(ranges + 1, 2);
117 		u64 cpu_addr = of_translate_dma_address(hose->dn, ranges + 3);
118 		size = of_read_number(ranges + pna + 3, 2);
119 		ranges += np;
120 		if (cpu_addr == OF_BAD_ADDR || size == 0)
121 			continue;
122 
123 		/* We only care about memory */
124 		if ((pci_space & 0x03000000) != 0x02000000)
125 			continue;
126 
127 		/* We currently only support memory at 0, and pci_addr
128 		 * within 32 bits space
129 		 */
130 		if (cpu_addr != 0 || pci_addr > 0xffffffff) {
131 			printk(KERN_WARNING "%s: Ignored unsupported dma range"
132 			       " 0x%016llx...0x%016llx -> 0x%016llx\n",
133 			       hose->dn->full_name,
134 			       pci_addr, pci_addr + size - 1, cpu_addr);
135 			continue;
136 		}
137 
138 		/* Check if not prefetchable */
139 		if (!(pci_space & 0x40000000))
140 			res->flags &= ~IORESOURCE_PREFETCH;
141 
142 
143 		/* Use that */
144 		res->start = pci_addr;
145 		/* Beware of 32 bits resources */
146 		if (sizeof(resource_size_t) == sizeof(u32) &&
147 		    (pci_addr + size) > 0x100000000ull)
148 			res->end = 0xffffffff;
149 		else
150 			res->end = res->start + size - 1;
151 		break;
152 	}
153 
154 	/* We only support one global DMA offset */
155 	if (dma_offset_set && pci_dram_offset != res->start) {
156 		printk(KERN_ERR "%s: dma-ranges(s) mismatch\n",
157 		       hose->dn->full_name);
158 		return -ENXIO;
159 	}
160 
161 	/* Check that we can fit all of memory as we don't support
162 	 * DMA bounce buffers
163 	 */
164 	if (size < total_memory) {
165 		printk(KERN_ERR "%s: dma-ranges too small "
166 		       "(size=%llx total_memory=%llx)\n",
167 		       hose->dn->full_name, size, (u64)total_memory);
168 		return -ENXIO;
169 	}
170 
171 	/* Check we are a power of 2 size and that base is a multiple of size*/
172 	if ((size & (size - 1)) != 0  ||
173 	    (res->start & (size - 1)) != 0) {
174 		printk(KERN_ERR "%s: dma-ranges unaligned\n",
175 		       hose->dn->full_name);
176 		return -ENXIO;
177 	}
178 
179 	/* Check that we are fully contained within 32 bits space if we are not
180 	 * running on a 460sx or 476fpe which have 64 bit bus addresses.
181 	 */
182 	if (res->end > 0xffffffff &&
183 	    !(of_device_is_compatible(hose->dn, "ibm,plb-pciex-460sx")
184 	      || of_device_is_compatible(hose->dn, "ibm,plb-pciex-476fpe"))) {
185 		printk(KERN_ERR "%s: dma-ranges outside of 32 bits space\n",
186 		       hose->dn->full_name);
187 		return -ENXIO;
188 	}
189  out:
190 	dma_offset_set = 1;
191 	pci_dram_offset = res->start;
192 	hose->dma_window_base_cur = res->start;
193 	hose->dma_window_size = resource_size(res);
194 
195 	printk(KERN_INFO "4xx PCI DMA offset set to 0x%08lx\n",
196 	       pci_dram_offset);
197 	printk(KERN_INFO "4xx PCI DMA window base to 0x%016llx\n",
198 	       (unsigned long long)hose->dma_window_base_cur);
199 	printk(KERN_INFO "DMA window size 0x%016llx\n",
200 	       (unsigned long long)hose->dma_window_size);
201 	return 0;
202 }
203 
204 /*
205  * 4xx PCI 2.x part
206  */
207 
ppc4xx_setup_one_pci_PMM(struct pci_controller * hose,void __iomem * reg,u64 plb_addr,u64 pci_addr,u64 size,unsigned int flags,int index)208 static int __init ppc4xx_setup_one_pci_PMM(struct pci_controller	*hose,
209 					   void __iomem			*reg,
210 					   u64				plb_addr,
211 					   u64				pci_addr,
212 					   u64				size,
213 					   unsigned int			flags,
214 					   int				index)
215 {
216 	u32 ma, pcila, pciha;
217 
218 	/* Hack warning ! The "old" PCI 2.x cell only let us configure the low
219 	 * 32-bit of incoming PLB addresses. The top 4 bits of the 36-bit
220 	 * address are actually hard wired to a value that appears to depend
221 	 * on the specific SoC. For example, it's 0 on 440EP and 1 on 440EPx.
222 	 *
223 	 * The trick here is we just crop those top bits and ignore them when
224 	 * programming the chip. That means the device-tree has to be right
225 	 * for the specific part used (we don't print a warning if it's wrong
226 	 * but on the other hand, you'll crash quickly enough), but at least
227 	 * this code should work whatever the hard coded value is
228 	 */
229 	plb_addr &= 0xffffffffull;
230 
231 	/* Note: Due to the above hack, the test below doesn't actually test
232 	 * if you address is above 4G, but it tests that address and
233 	 * (address + size) are both contained in the same 4G
234 	 */
235 	if ((plb_addr + size) > 0xffffffffull || !is_power_of_2(size) ||
236 	    size < 0x1000 || (plb_addr & (size - 1)) != 0) {
237 		printk(KERN_WARNING "%s: Resource out of range\n",
238 		       hose->dn->full_name);
239 		return -1;
240 	}
241 	ma = (0xffffffffu << ilog2(size)) | 1;
242 	if (flags & IORESOURCE_PREFETCH)
243 		ma |= 2;
244 
245 	pciha = RES_TO_U32_HIGH(pci_addr);
246 	pcila = RES_TO_U32_LOW(pci_addr);
247 
248 	writel(plb_addr, reg + PCIL0_PMM0LA + (0x10 * index));
249 	writel(pcila, reg + PCIL0_PMM0PCILA + (0x10 * index));
250 	writel(pciha, reg + PCIL0_PMM0PCIHA + (0x10 * index));
251 	writel(ma, reg + PCIL0_PMM0MA + (0x10 * index));
252 
253 	return 0;
254 }
255 
ppc4xx_configure_pci_PMMs(struct pci_controller * hose,void __iomem * reg)256 static void __init ppc4xx_configure_pci_PMMs(struct pci_controller *hose,
257 					     void __iomem *reg)
258 {
259 	int i, j, found_isa_hole = 0;
260 
261 	/* Setup outbound memory windows */
262 	for (i = j = 0; i < 3; i++) {
263 		struct resource *res = &hose->mem_resources[i];
264 		resource_size_t offset = hose->mem_offset[i];
265 
266 		/* we only care about memory windows */
267 		if (!(res->flags & IORESOURCE_MEM))
268 			continue;
269 		if (j > 2) {
270 			printk(KERN_WARNING "%s: Too many ranges\n",
271 			       hose->dn->full_name);
272 			break;
273 		}
274 
275 		/* Configure the resource */
276 		if (ppc4xx_setup_one_pci_PMM(hose, reg,
277 					     res->start,
278 					     res->start - offset,
279 					     resource_size(res),
280 					     res->flags,
281 					     j) == 0) {
282 			j++;
283 
284 			/* If the resource PCI address is 0 then we have our
285 			 * ISA memory hole
286 			 */
287 			if (res->start == offset)
288 				found_isa_hole = 1;
289 		}
290 	}
291 
292 	/* Handle ISA memory hole if not already covered */
293 	if (j <= 2 && !found_isa_hole && hose->isa_mem_size)
294 		if (ppc4xx_setup_one_pci_PMM(hose, reg, hose->isa_mem_phys, 0,
295 					     hose->isa_mem_size, 0, j) == 0)
296 			printk(KERN_INFO "%s: Legacy ISA memory support enabled\n",
297 			       hose->dn->full_name);
298 }
299 
ppc4xx_configure_pci_PTMs(struct pci_controller * hose,void __iomem * reg,const struct resource * res)300 static void __init ppc4xx_configure_pci_PTMs(struct pci_controller *hose,
301 					     void __iomem *reg,
302 					     const struct resource *res)
303 {
304 	resource_size_t size = resource_size(res);
305 	u32 sa;
306 
307 	/* Calculate window size */
308 	sa = (0xffffffffu << ilog2(size)) | 1;
309 	sa |= 0x1;
310 
311 	/* RAM is always at 0 local for now */
312 	writel(0, reg + PCIL0_PTM1LA);
313 	writel(sa, reg + PCIL0_PTM1MS);
314 
315 	/* Map on PCI side */
316 	early_write_config_dword(hose, hose->first_busno, 0,
317 				 PCI_BASE_ADDRESS_1, res->start);
318 	early_write_config_dword(hose, hose->first_busno, 0,
319 				 PCI_BASE_ADDRESS_2, 0x00000000);
320 	early_write_config_word(hose, hose->first_busno, 0,
321 				PCI_COMMAND, 0x0006);
322 }
323 
ppc4xx_probe_pci_bridge(struct device_node * np)324 static void __init ppc4xx_probe_pci_bridge(struct device_node *np)
325 {
326 	/* NYI */
327 	struct resource rsrc_cfg;
328 	struct resource rsrc_reg;
329 	struct resource dma_window;
330 	struct pci_controller *hose = NULL;
331 	void __iomem *reg = NULL;
332 	const int *bus_range;
333 	int primary = 0;
334 
335 	/* Check if device is enabled */
336 	if (!of_device_is_available(np)) {
337 		printk(KERN_INFO "%s: Port disabled via device-tree\n",
338 		       np->full_name);
339 		return;
340 	}
341 
342 	/* Fetch config space registers address */
343 	if (of_address_to_resource(np, 0, &rsrc_cfg)) {
344 		printk(KERN_ERR "%s: Can't get PCI config register base !",
345 		       np->full_name);
346 		return;
347 	}
348 	/* Fetch host bridge internal registers address */
349 	if (of_address_to_resource(np, 3, &rsrc_reg)) {
350 		printk(KERN_ERR "%s: Can't get PCI internal register base !",
351 		       np->full_name);
352 		return;
353 	}
354 
355 	/* Check if primary bridge */
356 	if (of_get_property(np, "primary", NULL))
357 		primary = 1;
358 
359 	/* Get bus range if any */
360 	bus_range = of_get_property(np, "bus-range", NULL);
361 
362 	/* Map registers */
363 	reg = ioremap(rsrc_reg.start, resource_size(&rsrc_reg));
364 	if (reg == NULL) {
365 		printk(KERN_ERR "%s: Can't map registers !", np->full_name);
366 		goto fail;
367 	}
368 
369 	/* Allocate the host controller data structure */
370 	hose = pcibios_alloc_controller(np);
371 	if (!hose)
372 		goto fail;
373 
374 	hose->first_busno = bus_range ? bus_range[0] : 0x0;
375 	hose->last_busno = bus_range ? bus_range[1] : 0xff;
376 
377 	/* Setup config space */
378 	setup_indirect_pci(hose, rsrc_cfg.start, rsrc_cfg.start + 0x4, 0);
379 
380 	/* Disable all windows */
381 	writel(0, reg + PCIL0_PMM0MA);
382 	writel(0, reg + PCIL0_PMM1MA);
383 	writel(0, reg + PCIL0_PMM2MA);
384 	writel(0, reg + PCIL0_PTM1MS);
385 	writel(0, reg + PCIL0_PTM2MS);
386 
387 	/* Parse outbound mapping resources */
388 	pci_process_bridge_OF_ranges(hose, np, primary);
389 
390 	/* Parse inbound mapping resources */
391 	if (ppc4xx_parse_dma_ranges(hose, reg, &dma_window) != 0)
392 		goto fail;
393 
394 	/* Configure outbound ranges POMs */
395 	ppc4xx_configure_pci_PMMs(hose, reg);
396 
397 	/* Configure inbound ranges PIMs */
398 	ppc4xx_configure_pci_PTMs(hose, reg, &dma_window);
399 
400 	/* We don't need the registers anymore */
401 	iounmap(reg);
402 	return;
403 
404  fail:
405 	if (hose)
406 		pcibios_free_controller(hose);
407 	if (reg)
408 		iounmap(reg);
409 }
410 
411 /*
412  * 4xx PCI-X part
413  */
414 
ppc4xx_setup_one_pcix_POM(struct pci_controller * hose,void __iomem * reg,u64 plb_addr,u64 pci_addr,u64 size,unsigned int flags,int index)415 static int __init ppc4xx_setup_one_pcix_POM(struct pci_controller	*hose,
416 					    void __iomem		*reg,
417 					    u64				plb_addr,
418 					    u64				pci_addr,
419 					    u64				size,
420 					    unsigned int		flags,
421 					    int				index)
422 {
423 	u32 lah, lal, pciah, pcial, sa;
424 
425 	if (!is_power_of_2(size) || size < 0x1000 ||
426 	    (plb_addr & (size - 1)) != 0) {
427 		printk(KERN_WARNING "%s: Resource out of range\n",
428 		       hose->dn->full_name);
429 		return -1;
430 	}
431 
432 	/* Calculate register values */
433 	lah = RES_TO_U32_HIGH(plb_addr);
434 	lal = RES_TO_U32_LOW(plb_addr);
435 	pciah = RES_TO_U32_HIGH(pci_addr);
436 	pcial = RES_TO_U32_LOW(pci_addr);
437 	sa = (0xffffffffu << ilog2(size)) | 0x1;
438 
439 	/* Program register values */
440 	if (index == 0) {
441 		writel(lah, reg + PCIX0_POM0LAH);
442 		writel(lal, reg + PCIX0_POM0LAL);
443 		writel(pciah, reg + PCIX0_POM0PCIAH);
444 		writel(pcial, reg + PCIX0_POM0PCIAL);
445 		writel(sa, reg + PCIX0_POM0SA);
446 	} else {
447 		writel(lah, reg + PCIX0_POM1LAH);
448 		writel(lal, reg + PCIX0_POM1LAL);
449 		writel(pciah, reg + PCIX0_POM1PCIAH);
450 		writel(pcial, reg + PCIX0_POM1PCIAL);
451 		writel(sa, reg + PCIX0_POM1SA);
452 	}
453 
454 	return 0;
455 }
456 
ppc4xx_configure_pcix_POMs(struct pci_controller * hose,void __iomem * reg)457 static void __init ppc4xx_configure_pcix_POMs(struct pci_controller *hose,
458 					      void __iomem *reg)
459 {
460 	int i, j, found_isa_hole = 0;
461 
462 	/* Setup outbound memory windows */
463 	for (i = j = 0; i < 3; i++) {
464 		struct resource *res = &hose->mem_resources[i];
465 		resource_size_t offset = hose->mem_offset[i];
466 
467 		/* we only care about memory windows */
468 		if (!(res->flags & IORESOURCE_MEM))
469 			continue;
470 		if (j > 1) {
471 			printk(KERN_WARNING "%s: Too many ranges\n",
472 			       hose->dn->full_name);
473 			break;
474 		}
475 
476 		/* Configure the resource */
477 		if (ppc4xx_setup_one_pcix_POM(hose, reg,
478 					      res->start,
479 					      res->start - offset,
480 					      resource_size(res),
481 					      res->flags,
482 					      j) == 0) {
483 			j++;
484 
485 			/* If the resource PCI address is 0 then we have our
486 			 * ISA memory hole
487 			 */
488 			if (res->start == offset)
489 				found_isa_hole = 1;
490 		}
491 	}
492 
493 	/* Handle ISA memory hole if not already covered */
494 	if (j <= 1 && !found_isa_hole && hose->isa_mem_size)
495 		if (ppc4xx_setup_one_pcix_POM(hose, reg, hose->isa_mem_phys, 0,
496 					      hose->isa_mem_size, 0, j) == 0)
497 			printk(KERN_INFO "%s: Legacy ISA memory support enabled\n",
498 			       hose->dn->full_name);
499 }
500 
ppc4xx_configure_pcix_PIMs(struct pci_controller * hose,void __iomem * reg,const struct resource * res,int big_pim,int enable_msi_hole)501 static void __init ppc4xx_configure_pcix_PIMs(struct pci_controller *hose,
502 					      void __iomem *reg,
503 					      const struct resource *res,
504 					      int big_pim,
505 					      int enable_msi_hole)
506 {
507 	resource_size_t size = resource_size(res);
508 	u32 sa;
509 
510 	/* RAM is always at 0 */
511 	writel(0x00000000, reg + PCIX0_PIM0LAH);
512 	writel(0x00000000, reg + PCIX0_PIM0LAL);
513 
514 	/* Calculate window size */
515 	sa = (0xffffffffu << ilog2(size)) | 1;
516 	sa |= 0x1;
517 	if (res->flags & IORESOURCE_PREFETCH)
518 		sa |= 0x2;
519 	if (enable_msi_hole)
520 		sa |= 0x4;
521 	writel(sa, reg + PCIX0_PIM0SA);
522 	if (big_pim)
523 		writel(0xffffffff, reg + PCIX0_PIM0SAH);
524 
525 	/* Map on PCI side */
526 	writel(0x00000000, reg + PCIX0_BAR0H);
527 	writel(res->start, reg + PCIX0_BAR0L);
528 	writew(0x0006, reg + PCIX0_COMMAND);
529 }
530 
ppc4xx_probe_pcix_bridge(struct device_node * np)531 static void __init ppc4xx_probe_pcix_bridge(struct device_node *np)
532 {
533 	struct resource rsrc_cfg;
534 	struct resource rsrc_reg;
535 	struct resource dma_window;
536 	struct pci_controller *hose = NULL;
537 	void __iomem *reg = NULL;
538 	const int *bus_range;
539 	int big_pim = 0, msi = 0, primary = 0;
540 
541 	/* Fetch config space registers address */
542 	if (of_address_to_resource(np, 0, &rsrc_cfg)) {
543 		printk(KERN_ERR "%s:Can't get PCI-X config register base !",
544 		       np->full_name);
545 		return;
546 	}
547 	/* Fetch host bridge internal registers address */
548 	if (of_address_to_resource(np, 3, &rsrc_reg)) {
549 		printk(KERN_ERR "%s: Can't get PCI-X internal register base !",
550 		       np->full_name);
551 		return;
552 	}
553 
554 	/* Check if it supports large PIMs (440GX) */
555 	if (of_get_property(np, "large-inbound-windows", NULL))
556 		big_pim = 1;
557 
558 	/* Check if we should enable MSIs inbound hole */
559 	if (of_get_property(np, "enable-msi-hole", NULL))
560 		msi = 1;
561 
562 	/* Check if primary bridge */
563 	if (of_get_property(np, "primary", NULL))
564 		primary = 1;
565 
566 	/* Get bus range if any */
567 	bus_range = of_get_property(np, "bus-range", NULL);
568 
569 	/* Map registers */
570 	reg = ioremap(rsrc_reg.start, resource_size(&rsrc_reg));
571 	if (reg == NULL) {
572 		printk(KERN_ERR "%s: Can't map registers !", np->full_name);
573 		goto fail;
574 	}
575 
576 	/* Allocate the host controller data structure */
577 	hose = pcibios_alloc_controller(np);
578 	if (!hose)
579 		goto fail;
580 
581 	hose->first_busno = bus_range ? bus_range[0] : 0x0;
582 	hose->last_busno = bus_range ? bus_range[1] : 0xff;
583 
584 	/* Setup config space */
585 	setup_indirect_pci(hose, rsrc_cfg.start, rsrc_cfg.start + 0x4,
586 					PPC_INDIRECT_TYPE_SET_CFG_TYPE);
587 
588 	/* Disable all windows */
589 	writel(0, reg + PCIX0_POM0SA);
590 	writel(0, reg + PCIX0_POM1SA);
591 	writel(0, reg + PCIX0_POM2SA);
592 	writel(0, reg + PCIX0_PIM0SA);
593 	writel(0, reg + PCIX0_PIM1SA);
594 	writel(0, reg + PCIX0_PIM2SA);
595 	if (big_pim) {
596 		writel(0, reg + PCIX0_PIM0SAH);
597 		writel(0, reg + PCIX0_PIM2SAH);
598 	}
599 
600 	/* Parse outbound mapping resources */
601 	pci_process_bridge_OF_ranges(hose, np, primary);
602 
603 	/* Parse inbound mapping resources */
604 	if (ppc4xx_parse_dma_ranges(hose, reg, &dma_window) != 0)
605 		goto fail;
606 
607 	/* Configure outbound ranges POMs */
608 	ppc4xx_configure_pcix_POMs(hose, reg);
609 
610 	/* Configure inbound ranges PIMs */
611 	ppc4xx_configure_pcix_PIMs(hose, reg, &dma_window, big_pim, msi);
612 
613 	/* We don't need the registers anymore */
614 	iounmap(reg);
615 	return;
616 
617  fail:
618 	if (hose)
619 		pcibios_free_controller(hose);
620 	if (reg)
621 		iounmap(reg);
622 }
623 
624 #ifdef CONFIG_PPC4xx_PCI_EXPRESS
625 
626 /*
627  * 4xx PCI-Express part
628  *
629  * We support 3 parts currently based on the compatible property:
630  *
631  * ibm,plb-pciex-440spe
632  * ibm,plb-pciex-405ex
633  * ibm,plb-pciex-460ex
634  *
635  * Anything else will be rejected for now as they are all subtly
636  * different unfortunately.
637  *
638  */
639 
640 #define MAX_PCIE_BUS_MAPPED	0x40
641 
642 struct ppc4xx_pciex_port
643 {
644 	struct pci_controller	*hose;
645 	struct device_node	*node;
646 	unsigned int		index;
647 	int			endpoint;
648 	int			link;
649 	int			has_ibpre;
650 	unsigned int		sdr_base;
651 	dcr_host_t		dcrs;
652 	struct resource		cfg_space;
653 	struct resource		utl_regs;
654 	void __iomem		*utl_base;
655 };
656 
657 static struct ppc4xx_pciex_port *ppc4xx_pciex_ports;
658 static unsigned int ppc4xx_pciex_port_count;
659 
660 struct ppc4xx_pciex_hwops
661 {
662 	bool want_sdr;
663 	int (*core_init)(struct device_node *np);
664 	int (*port_init_hw)(struct ppc4xx_pciex_port *port);
665 	int (*setup_utl)(struct ppc4xx_pciex_port *port);
666 	void (*check_link)(struct ppc4xx_pciex_port *port);
667 };
668 
669 static struct ppc4xx_pciex_hwops *ppc4xx_pciex_hwops;
670 
ppc4xx_pciex_wait_on_sdr(struct ppc4xx_pciex_port * port,unsigned int sdr_offset,unsigned int mask,unsigned int value,int timeout_ms)671 static int __init ppc4xx_pciex_wait_on_sdr(struct ppc4xx_pciex_port *port,
672 					   unsigned int sdr_offset,
673 					   unsigned int mask,
674 					   unsigned int value,
675 					   int timeout_ms)
676 {
677 	u32 val;
678 
679 	while(timeout_ms--) {
680 		val = mfdcri(SDR0, port->sdr_base + sdr_offset);
681 		if ((val & mask) == value) {
682 			pr_debug("PCIE%d: Wait on SDR %x success with tm %d (%08x)\n",
683 				 port->index, sdr_offset, timeout_ms, val);
684 			return 0;
685 		}
686 		msleep(1);
687 	}
688 	return -1;
689 }
690 
ppc4xx_pciex_port_reset_sdr(struct ppc4xx_pciex_port * port)691 static int __init ppc4xx_pciex_port_reset_sdr(struct ppc4xx_pciex_port *port)
692 {
693 	/* Wait for reset to complete */
694 	if (ppc4xx_pciex_wait_on_sdr(port, PESDRn_RCSSTS, 1 << 20, 0, 10)) {
695 		printk(KERN_WARNING "PCIE%d: PGRST failed\n",
696 		       port->index);
697 		return -1;
698 	}
699 	return 0;
700 }
701 
702 
ppc4xx_pciex_check_link_sdr(struct ppc4xx_pciex_port * port)703 static void __init ppc4xx_pciex_check_link_sdr(struct ppc4xx_pciex_port *port)
704 {
705 	printk(KERN_INFO "PCIE%d: Checking link...\n", port->index);
706 
707 	/* Check for card presence detect if supported, if not, just wait for
708 	 * link unconditionally.
709 	 *
710 	 * note that we don't fail if there is no link, we just filter out
711 	 * config space accesses. That way, it will be easier to implement
712 	 * hotplug later on.
713 	 */
714 	if (!port->has_ibpre ||
715 	    !ppc4xx_pciex_wait_on_sdr(port, PESDRn_LOOP,
716 				      1 << 28, 1 << 28, 100)) {
717 		printk(KERN_INFO
718 		       "PCIE%d: Device detected, waiting for link...\n",
719 		       port->index);
720 		if (ppc4xx_pciex_wait_on_sdr(port, PESDRn_LOOP,
721 					     0x1000, 0x1000, 2000))
722 			printk(KERN_WARNING
723 			       "PCIE%d: Link up failed\n", port->index);
724 		else {
725 			printk(KERN_INFO
726 			       "PCIE%d: link is up !\n", port->index);
727 			port->link = 1;
728 		}
729 	} else
730 		printk(KERN_INFO "PCIE%d: No device detected.\n", port->index);
731 }
732 
733 #ifdef CONFIG_44x
734 
735 /* Check various reset bits of the 440SPe PCIe core */
ppc440spe_pciex_check_reset(struct device_node * np)736 static int __init ppc440spe_pciex_check_reset(struct device_node *np)
737 {
738 	u32 valPE0, valPE1, valPE2;
739 	int err = 0;
740 
741 	/* SDR0_PEGPLLLCT1 reset */
742 	if (!(mfdcri(SDR0, PESDR0_PLLLCT1) & 0x01000000)) {
743 		/*
744 		 * the PCIe core was probably already initialised
745 		 * by firmware - let's re-reset RCSSET regs
746 		 *
747 		 * -- Shouldn't we also re-reset the whole thing ? -- BenH
748 		 */
749 		pr_debug("PCIE: SDR0_PLLLCT1 already reset.\n");
750 		mtdcri(SDR0, PESDR0_440SPE_RCSSET, 0x01010000);
751 		mtdcri(SDR0, PESDR1_440SPE_RCSSET, 0x01010000);
752 		mtdcri(SDR0, PESDR2_440SPE_RCSSET, 0x01010000);
753 	}
754 
755 	valPE0 = mfdcri(SDR0, PESDR0_440SPE_RCSSET);
756 	valPE1 = mfdcri(SDR0, PESDR1_440SPE_RCSSET);
757 	valPE2 = mfdcri(SDR0, PESDR2_440SPE_RCSSET);
758 
759 	/* SDR0_PExRCSSET rstgu */
760 	if (!(valPE0 & 0x01000000) ||
761 	    !(valPE1 & 0x01000000) ||
762 	    !(valPE2 & 0x01000000)) {
763 		printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstgu error\n");
764 		err = -1;
765 	}
766 
767 	/* SDR0_PExRCSSET rstdl */
768 	if (!(valPE0 & 0x00010000) ||
769 	    !(valPE1 & 0x00010000) ||
770 	    !(valPE2 & 0x00010000)) {
771 		printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstdl error\n");
772 		err = -1;
773 	}
774 
775 	/* SDR0_PExRCSSET rstpyn */
776 	if ((valPE0 & 0x00001000) ||
777 	    (valPE1 & 0x00001000) ||
778 	    (valPE2 & 0x00001000)) {
779 		printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstpyn error\n");
780 		err = -1;
781 	}
782 
783 	/* SDR0_PExRCSSET hldplb */
784 	if ((valPE0 & 0x10000000) ||
785 	    (valPE1 & 0x10000000) ||
786 	    (valPE2 & 0x10000000)) {
787 		printk(KERN_INFO "PCIE: SDR0_PExRCSSET hldplb error\n");
788 		err = -1;
789 	}
790 
791 	/* SDR0_PExRCSSET rdy */
792 	if ((valPE0 & 0x00100000) ||
793 	    (valPE1 & 0x00100000) ||
794 	    (valPE2 & 0x00100000)) {
795 		printk(KERN_INFO "PCIE: SDR0_PExRCSSET rdy error\n");
796 		err = -1;
797 	}
798 
799 	/* SDR0_PExRCSSET shutdown */
800 	if ((valPE0 & 0x00000100) ||
801 	    (valPE1 & 0x00000100) ||
802 	    (valPE2 & 0x00000100)) {
803 		printk(KERN_INFO "PCIE: SDR0_PExRCSSET shutdown error\n");
804 		err = -1;
805 	}
806 
807 	return err;
808 }
809 
810 /* Global PCIe core initializations for 440SPe core */
ppc440spe_pciex_core_init(struct device_node * np)811 static int __init ppc440spe_pciex_core_init(struct device_node *np)
812 {
813 	int time_out = 20;
814 
815 	/* Set PLL clock receiver to LVPECL */
816 	dcri_clrset(SDR0, PESDR0_PLLLCT1, 0, 1 << 28);
817 
818 	/* Shouldn't we do all the calibration stuff etc... here ? */
819 	if (ppc440spe_pciex_check_reset(np))
820 		return -ENXIO;
821 
822 	if (!(mfdcri(SDR0, PESDR0_PLLLCT2) & 0x10000)) {
823 		printk(KERN_INFO "PCIE: PESDR_PLLCT2 resistance calibration "
824 		       "failed (0x%08x)\n",
825 		       mfdcri(SDR0, PESDR0_PLLLCT2));
826 		return -1;
827 	}
828 
829 	/* De-assert reset of PCIe PLL, wait for lock */
830 	dcri_clrset(SDR0, PESDR0_PLLLCT1, 1 << 24, 0);
831 	udelay(3);
832 
833 	while (time_out) {
834 		if (!(mfdcri(SDR0, PESDR0_PLLLCT3) & 0x10000000)) {
835 			time_out--;
836 			udelay(1);
837 		} else
838 			break;
839 	}
840 	if (!time_out) {
841 		printk(KERN_INFO "PCIE: VCO output not locked\n");
842 		return -1;
843 	}
844 
845 	pr_debug("PCIE initialization OK\n");
846 
847 	return 3;
848 }
849 
ppc440spe_pciex_init_port_hw(struct ppc4xx_pciex_port * port)850 static int __init ppc440spe_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
851 {
852 	u32 val = 1 << 24;
853 
854 	if (port->endpoint)
855 		val = PTYPE_LEGACY_ENDPOINT << 20;
856 	else
857 		val = PTYPE_ROOT_PORT << 20;
858 
859 	if (port->index == 0)
860 		val |= LNKW_X8 << 12;
861 	else
862 		val |= LNKW_X4 << 12;
863 
864 	mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, val);
865 	mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, 0x20222222);
866 	if (ppc440spe_revA())
867 		mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x11000000);
868 	mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL0SET1, 0x35000000);
869 	mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL1SET1, 0x35000000);
870 	mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL2SET1, 0x35000000);
871 	mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL3SET1, 0x35000000);
872 	if (port->index == 0) {
873 		mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL4SET1,
874 		       0x35000000);
875 		mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL5SET1,
876 		       0x35000000);
877 		mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL6SET1,
878 		       0x35000000);
879 		mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL7SET1,
880 		       0x35000000);
881 	}
882 	dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET,
883 			(1 << 24) | (1 << 16), 1 << 12);
884 
885 	return ppc4xx_pciex_port_reset_sdr(port);
886 }
887 
ppc440speA_pciex_init_port_hw(struct ppc4xx_pciex_port * port)888 static int __init ppc440speA_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
889 {
890 	return ppc440spe_pciex_init_port_hw(port);
891 }
892 
ppc440speB_pciex_init_port_hw(struct ppc4xx_pciex_port * port)893 static int __init ppc440speB_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
894 {
895 	int rc = ppc440spe_pciex_init_port_hw(port);
896 
897 	port->has_ibpre = 1;
898 
899 	return rc;
900 }
901 
ppc440speA_pciex_init_utl(struct ppc4xx_pciex_port * port)902 static int ppc440speA_pciex_init_utl(struct ppc4xx_pciex_port *port)
903 {
904 	/* XXX Check what that value means... I hate magic */
905 	dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x68782800);
906 
907 	/*
908 	 * Set buffer allocations and then assert VRB and TXE.
909 	 */
910 	out_be32(port->utl_base + PEUTL_OUTTR,   0x08000000);
911 	out_be32(port->utl_base + PEUTL_INTR,    0x02000000);
912 	out_be32(port->utl_base + PEUTL_OPDBSZ,  0x10000000);
913 	out_be32(port->utl_base + PEUTL_PBBSZ,   0x53000000);
914 	out_be32(port->utl_base + PEUTL_IPHBSZ,  0x08000000);
915 	out_be32(port->utl_base + PEUTL_IPDBSZ,  0x10000000);
916 	out_be32(port->utl_base + PEUTL_RCIRQEN, 0x00f00000);
917 	out_be32(port->utl_base + PEUTL_PCTL,    0x80800066);
918 
919 	return 0;
920 }
921 
ppc440speB_pciex_init_utl(struct ppc4xx_pciex_port * port)922 static int ppc440speB_pciex_init_utl(struct ppc4xx_pciex_port *port)
923 {
924 	/* Report CRS to the operating system */
925 	out_be32(port->utl_base + PEUTL_PBCTL,    0x08000000);
926 
927 	return 0;
928 }
929 
930 static struct ppc4xx_pciex_hwops ppc440speA_pcie_hwops __initdata =
931 {
932 	.want_sdr	= true,
933 	.core_init	= ppc440spe_pciex_core_init,
934 	.port_init_hw	= ppc440speA_pciex_init_port_hw,
935 	.setup_utl	= ppc440speA_pciex_init_utl,
936 	.check_link	= ppc4xx_pciex_check_link_sdr,
937 };
938 
939 static struct ppc4xx_pciex_hwops ppc440speB_pcie_hwops __initdata =
940 {
941 	.want_sdr	= true,
942 	.core_init	= ppc440spe_pciex_core_init,
943 	.port_init_hw	= ppc440speB_pciex_init_port_hw,
944 	.setup_utl	= ppc440speB_pciex_init_utl,
945 	.check_link	= ppc4xx_pciex_check_link_sdr,
946 };
947 
ppc460ex_pciex_core_init(struct device_node * np)948 static int __init ppc460ex_pciex_core_init(struct device_node *np)
949 {
950 	/* Nothing to do, return 2 ports */
951 	return 2;
952 }
953 
ppc460ex_pciex_init_port_hw(struct ppc4xx_pciex_port * port)954 static int __init ppc460ex_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
955 {
956 	u32 val;
957 	u32 utlset1;
958 
959 	if (port->endpoint)
960 		val = PTYPE_LEGACY_ENDPOINT << 20;
961 	else
962 		val = PTYPE_ROOT_PORT << 20;
963 
964 	if (port->index == 0) {
965 		val |= LNKW_X1 << 12;
966 		utlset1 = 0x20000000;
967 	} else {
968 		val |= LNKW_X4 << 12;
969 		utlset1 = 0x20101101;
970 	}
971 
972 	mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, val);
973 	mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, utlset1);
974 	mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x01210000);
975 
976 	switch (port->index) {
977 	case 0:
978 		mtdcri(SDR0, PESDR0_460EX_L0CDRCTL, 0x00003230);
979 		mtdcri(SDR0, PESDR0_460EX_L0DRV, 0x00000130);
980 		mtdcri(SDR0, PESDR0_460EX_L0CLK, 0x00000006);
981 
982 		mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST,0x10000000);
983 		break;
984 
985 	case 1:
986 		mtdcri(SDR0, PESDR1_460EX_L0CDRCTL, 0x00003230);
987 		mtdcri(SDR0, PESDR1_460EX_L1CDRCTL, 0x00003230);
988 		mtdcri(SDR0, PESDR1_460EX_L2CDRCTL, 0x00003230);
989 		mtdcri(SDR0, PESDR1_460EX_L3CDRCTL, 0x00003230);
990 		mtdcri(SDR0, PESDR1_460EX_L0DRV, 0x00000130);
991 		mtdcri(SDR0, PESDR1_460EX_L1DRV, 0x00000130);
992 		mtdcri(SDR0, PESDR1_460EX_L2DRV, 0x00000130);
993 		mtdcri(SDR0, PESDR1_460EX_L3DRV, 0x00000130);
994 		mtdcri(SDR0, PESDR1_460EX_L0CLK, 0x00000006);
995 		mtdcri(SDR0, PESDR1_460EX_L1CLK, 0x00000006);
996 		mtdcri(SDR0, PESDR1_460EX_L2CLK, 0x00000006);
997 		mtdcri(SDR0, PESDR1_460EX_L3CLK, 0x00000006);
998 
999 		mtdcri(SDR0, PESDR1_460EX_PHY_CTL_RST,0x10000000);
1000 		break;
1001 	}
1002 
1003 	mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
1004 	       mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) |
1005 	       (PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTPYN));
1006 
1007 	/* Poll for PHY reset */
1008 	/* XXX FIXME add timeout */
1009 	switch (port->index) {
1010 	case 0:
1011 		while (!(mfdcri(SDR0, PESDR0_460EX_RSTSTA) & 0x1))
1012 			udelay(10);
1013 		break;
1014 	case 1:
1015 		while (!(mfdcri(SDR0, PESDR1_460EX_RSTSTA) & 0x1))
1016 			udelay(10);
1017 		break;
1018 	}
1019 
1020 	mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
1021 	       (mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) &
1022 		~(PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTDL)) |
1023 	       PESDRx_RCSSET_RSTPYN);
1024 
1025 	port->has_ibpre = 1;
1026 
1027 	return ppc4xx_pciex_port_reset_sdr(port);
1028 }
1029 
ppc460ex_pciex_init_utl(struct ppc4xx_pciex_port * port)1030 static int ppc460ex_pciex_init_utl(struct ppc4xx_pciex_port *port)
1031 {
1032 	dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x0);
1033 
1034 	/*
1035 	 * Set buffer allocations and then assert VRB and TXE.
1036 	 */
1037 	out_be32(port->utl_base + PEUTL_PBCTL,	0x0800000c);
1038 	out_be32(port->utl_base + PEUTL_OUTTR,	0x08000000);
1039 	out_be32(port->utl_base + PEUTL_INTR,	0x02000000);
1040 	out_be32(port->utl_base + PEUTL_OPDBSZ,	0x04000000);
1041 	out_be32(port->utl_base + PEUTL_PBBSZ,	0x00000000);
1042 	out_be32(port->utl_base + PEUTL_IPHBSZ,	0x02000000);
1043 	out_be32(port->utl_base + PEUTL_IPDBSZ,	0x04000000);
1044 	out_be32(port->utl_base + PEUTL_RCIRQEN,0x00f00000);
1045 	out_be32(port->utl_base + PEUTL_PCTL,	0x80800066);
1046 
1047 	return 0;
1048 }
1049 
1050 static struct ppc4xx_pciex_hwops ppc460ex_pcie_hwops __initdata =
1051 {
1052 	.want_sdr	= true,
1053 	.core_init	= ppc460ex_pciex_core_init,
1054 	.port_init_hw	= ppc460ex_pciex_init_port_hw,
1055 	.setup_utl	= ppc460ex_pciex_init_utl,
1056 	.check_link	= ppc4xx_pciex_check_link_sdr,
1057 };
1058 
apm821xx_pciex_core_init(struct device_node * np)1059 static int __init apm821xx_pciex_core_init(struct device_node *np)
1060 {
1061 	/* Return the number of pcie port */
1062 	return 1;
1063 }
1064 
apm821xx_pciex_init_port_hw(struct ppc4xx_pciex_port * port)1065 static int __init apm821xx_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
1066 {
1067 	u32 val;
1068 
1069 	/*
1070 	 * Do a software reset on PCIe ports.
1071 	 * This code is to fix the issue that pci drivers doesn't re-assign
1072 	 * bus number for PCIE devices after Uboot
1073 	 * scanned and configured all the buses (eg. PCIE NIC IntelPro/1000
1074 	 * PT quad port, SAS LSI 1064E)
1075 	 */
1076 
1077 	mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST, 0x0);
1078 	mdelay(10);
1079 
1080 	if (port->endpoint)
1081 		val = PTYPE_LEGACY_ENDPOINT << 20;
1082 	else
1083 		val = PTYPE_ROOT_PORT << 20;
1084 
1085 	val |= LNKW_X1 << 12;
1086 
1087 	mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, val);
1088 	mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, 0x00000000);
1089 	mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x01010000);
1090 
1091 	mtdcri(SDR0, PESDR0_460EX_L0CDRCTL, 0x00003230);
1092 	mtdcri(SDR0, PESDR0_460EX_L0DRV, 0x00000130);
1093 	mtdcri(SDR0, PESDR0_460EX_L0CLK, 0x00000006);
1094 
1095 	mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST, 0x10000000);
1096 	mdelay(50);
1097 	mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST, 0x30000000);
1098 
1099 	mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
1100 		mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) |
1101 		(PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTPYN));
1102 
1103 	/* Poll for PHY reset */
1104 	val = PESDR0_460EX_RSTSTA - port->sdr_base;
1105 	if (ppc4xx_pciex_wait_on_sdr(port, val, 0x1, 1,	100)) {
1106 		printk(KERN_WARNING "%s: PCIE: Can't reset PHY\n", __func__);
1107 		return -EBUSY;
1108 	} else {
1109 		mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
1110 			(mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) &
1111 			~(PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTDL)) |
1112 			PESDRx_RCSSET_RSTPYN);
1113 
1114 		port->has_ibpre = 1;
1115 		return 0;
1116 	}
1117 }
1118 
1119 static struct ppc4xx_pciex_hwops apm821xx_pcie_hwops __initdata = {
1120 	.want_sdr   = true,
1121 	.core_init	= apm821xx_pciex_core_init,
1122 	.port_init_hw	= apm821xx_pciex_init_port_hw,
1123 	.setup_utl	= ppc460ex_pciex_init_utl,
1124 	.check_link = ppc4xx_pciex_check_link_sdr,
1125 };
1126 
ppc460sx_pciex_core_init(struct device_node * np)1127 static int __init ppc460sx_pciex_core_init(struct device_node *np)
1128 {
1129 	/* HSS drive amplitude */
1130 	mtdcri(SDR0, PESDR0_460SX_HSSL0DAMP, 0xB9843211);
1131 	mtdcri(SDR0, PESDR0_460SX_HSSL1DAMP, 0xB9843211);
1132 	mtdcri(SDR0, PESDR0_460SX_HSSL2DAMP, 0xB9843211);
1133 	mtdcri(SDR0, PESDR0_460SX_HSSL3DAMP, 0xB9843211);
1134 	mtdcri(SDR0, PESDR0_460SX_HSSL4DAMP, 0xB9843211);
1135 	mtdcri(SDR0, PESDR0_460SX_HSSL5DAMP, 0xB9843211);
1136 	mtdcri(SDR0, PESDR0_460SX_HSSL6DAMP, 0xB9843211);
1137 	mtdcri(SDR0, PESDR0_460SX_HSSL7DAMP, 0xB9843211);
1138 
1139 	mtdcri(SDR0, PESDR1_460SX_HSSL0DAMP, 0xB9843211);
1140 	mtdcri(SDR0, PESDR1_460SX_HSSL1DAMP, 0xB9843211);
1141 	mtdcri(SDR0, PESDR1_460SX_HSSL2DAMP, 0xB9843211);
1142 	mtdcri(SDR0, PESDR1_460SX_HSSL3DAMP, 0xB9843211);
1143 
1144 	mtdcri(SDR0, PESDR2_460SX_HSSL0DAMP, 0xB9843211);
1145 	mtdcri(SDR0, PESDR2_460SX_HSSL1DAMP, 0xB9843211);
1146 	mtdcri(SDR0, PESDR2_460SX_HSSL2DAMP, 0xB9843211);
1147 	mtdcri(SDR0, PESDR2_460SX_HSSL3DAMP, 0xB9843211);
1148 
1149 	/* HSS TX pre-emphasis */
1150 	mtdcri(SDR0, PESDR0_460SX_HSSL0COEFA, 0xDCB98987);
1151 	mtdcri(SDR0, PESDR0_460SX_HSSL1COEFA, 0xDCB98987);
1152 	mtdcri(SDR0, PESDR0_460SX_HSSL2COEFA, 0xDCB98987);
1153 	mtdcri(SDR0, PESDR0_460SX_HSSL3COEFA, 0xDCB98987);
1154 	mtdcri(SDR0, PESDR0_460SX_HSSL4COEFA, 0xDCB98987);
1155 	mtdcri(SDR0, PESDR0_460SX_HSSL5COEFA, 0xDCB98987);
1156 	mtdcri(SDR0, PESDR0_460SX_HSSL6COEFA, 0xDCB98987);
1157 	mtdcri(SDR0, PESDR0_460SX_HSSL7COEFA, 0xDCB98987);
1158 
1159 	mtdcri(SDR0, PESDR1_460SX_HSSL0COEFA, 0xDCB98987);
1160 	mtdcri(SDR0, PESDR1_460SX_HSSL1COEFA, 0xDCB98987);
1161 	mtdcri(SDR0, PESDR1_460SX_HSSL2COEFA, 0xDCB98987);
1162 	mtdcri(SDR0, PESDR1_460SX_HSSL3COEFA, 0xDCB98987);
1163 
1164 	mtdcri(SDR0, PESDR2_460SX_HSSL0COEFA, 0xDCB98987);
1165 	mtdcri(SDR0, PESDR2_460SX_HSSL1COEFA, 0xDCB98987);
1166 	mtdcri(SDR0, PESDR2_460SX_HSSL2COEFA, 0xDCB98987);
1167 	mtdcri(SDR0, PESDR2_460SX_HSSL3COEFA, 0xDCB98987);
1168 
1169 	/* HSS TX calibration control */
1170 	mtdcri(SDR0, PESDR0_460SX_HSSL1CALDRV, 0x22222222);
1171 	mtdcri(SDR0, PESDR1_460SX_HSSL1CALDRV, 0x22220000);
1172 	mtdcri(SDR0, PESDR2_460SX_HSSL1CALDRV, 0x22220000);
1173 
1174 	/* HSS TX slew control */
1175 	mtdcri(SDR0, PESDR0_460SX_HSSSLEW, 0xFFFFFFFF);
1176 	mtdcri(SDR0, PESDR1_460SX_HSSSLEW, 0xFFFF0000);
1177 	mtdcri(SDR0, PESDR2_460SX_HSSSLEW, 0xFFFF0000);
1178 
1179 	/* Set HSS PRBS enabled */
1180 	mtdcri(SDR0, PESDR0_460SX_HSSCTLSET, 0x00001130);
1181 	mtdcri(SDR0, PESDR2_460SX_HSSCTLSET, 0x00001130);
1182 
1183 	udelay(100);
1184 
1185 	/* De-assert PLLRESET */
1186 	dcri_clrset(SDR0, PESDR0_PLLLCT2, 0x00000100, 0);
1187 
1188 	/* Reset DL, UTL, GPL before configuration */
1189 	mtdcri(SDR0, PESDR0_460SX_RCSSET,
1190 			PESDRx_RCSSET_RSTDL | PESDRx_RCSSET_RSTGU);
1191 	mtdcri(SDR0, PESDR1_460SX_RCSSET,
1192 			PESDRx_RCSSET_RSTDL | PESDRx_RCSSET_RSTGU);
1193 	mtdcri(SDR0, PESDR2_460SX_RCSSET,
1194 			PESDRx_RCSSET_RSTDL | PESDRx_RCSSET_RSTGU);
1195 
1196 	udelay(100);
1197 
1198 	/*
1199 	 * If bifurcation is not enabled, u-boot would have disabled the
1200 	 * third PCIe port
1201 	 */
1202 	if (((mfdcri(SDR0, PESDR1_460SX_HSSCTLSET) & 0x00000001) ==
1203 				0x00000001)) {
1204 		printk(KERN_INFO "PCI: PCIE bifurcation setup successfully.\n");
1205 		printk(KERN_INFO "PCI: Total 3 PCIE ports are present\n");
1206 		return 3;
1207 	}
1208 
1209 	printk(KERN_INFO "PCI: Total 2 PCIE ports are present\n");
1210 	return 2;
1211 }
1212 
ppc460sx_pciex_init_port_hw(struct ppc4xx_pciex_port * port)1213 static int __init ppc460sx_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
1214 {
1215 
1216 	if (port->endpoint)
1217 		dcri_clrset(SDR0, port->sdr_base + PESDRn_UTLSET2,
1218 				0x01000000, 0);
1219 	else
1220 		dcri_clrset(SDR0, port->sdr_base + PESDRn_UTLSET2,
1221 				0, 0x01000000);
1222 
1223 	dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET,
1224 			(PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTDL),
1225 			PESDRx_RCSSET_RSTPYN);
1226 
1227 	port->has_ibpre = 1;
1228 
1229 	return ppc4xx_pciex_port_reset_sdr(port);
1230 }
1231 
ppc460sx_pciex_init_utl(struct ppc4xx_pciex_port * port)1232 static int ppc460sx_pciex_init_utl(struct ppc4xx_pciex_port *port)
1233 {
1234 	/* Max 128 Bytes */
1235 	out_be32 (port->utl_base + PEUTL_PBBSZ,   0x00000000);
1236 	/* Assert VRB and TXE - per datasheet turn off addr validation */
1237 	out_be32(port->utl_base + PEUTL_PCTL,  0x80800000);
1238 	return 0;
1239 }
1240 
ppc460sx_pciex_check_link(struct ppc4xx_pciex_port * port)1241 static void __init ppc460sx_pciex_check_link(struct ppc4xx_pciex_port *port)
1242 {
1243 	void __iomem *mbase;
1244 	int attempt = 50;
1245 
1246 	port->link = 0;
1247 
1248 	mbase = ioremap(port->cfg_space.start + 0x10000000, 0x1000);
1249 	if (mbase == NULL) {
1250 		printk(KERN_ERR "%s: Can't map internal config space !",
1251 			port->node->full_name);
1252 		goto done;
1253 	}
1254 
1255 	while (attempt && (0 == (in_le32(mbase + PECFG_460SX_DLLSTA)
1256 			& PECFG_460SX_DLLSTA_LINKUP))) {
1257 		attempt--;
1258 		mdelay(10);
1259 	}
1260 	if (attempt)
1261 		port->link = 1;
1262 done:
1263 	iounmap(mbase);
1264 
1265 }
1266 
1267 static struct ppc4xx_pciex_hwops ppc460sx_pcie_hwops __initdata = {
1268 	.want_sdr	= true,
1269 	.core_init	= ppc460sx_pciex_core_init,
1270 	.port_init_hw	= ppc460sx_pciex_init_port_hw,
1271 	.setup_utl	= ppc460sx_pciex_init_utl,
1272 	.check_link	= ppc460sx_pciex_check_link,
1273 };
1274 
1275 #endif /* CONFIG_44x */
1276 
1277 #ifdef CONFIG_40x
1278 
ppc405ex_pciex_core_init(struct device_node * np)1279 static int __init ppc405ex_pciex_core_init(struct device_node *np)
1280 {
1281 	/* Nothing to do, return 2 ports */
1282 	return 2;
1283 }
1284 
ppc405ex_pcie_phy_reset(struct ppc4xx_pciex_port * port)1285 static void ppc405ex_pcie_phy_reset(struct ppc4xx_pciex_port *port)
1286 {
1287 	/* Assert the PE0_PHY reset */
1288 	mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01010000);
1289 	msleep(1);
1290 
1291 	/* deassert the PE0_hotreset */
1292 	if (port->endpoint)
1293 		mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01111000);
1294 	else
1295 		mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01101000);
1296 
1297 	/* poll for phy !reset */
1298 	/* XXX FIXME add timeout */
1299 	while (!(mfdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSTA) & 0x00001000))
1300 		;
1301 
1302 	/* deassert the PE0_gpl_utl_reset */
1303 	mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x00101000);
1304 }
1305 
ppc405ex_pciex_init_port_hw(struct ppc4xx_pciex_port * port)1306 static int __init ppc405ex_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
1307 {
1308 	u32 val;
1309 
1310 	if (port->endpoint)
1311 		val = PTYPE_LEGACY_ENDPOINT;
1312 	else
1313 		val = PTYPE_ROOT_PORT;
1314 
1315 	mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET,
1316 	       1 << 24 | val << 20 | LNKW_X1 << 12);
1317 
1318 	mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, 0x00000000);
1319 	mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x01010000);
1320 	mtdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSET1, 0x720F0000);
1321 	mtdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSET2, 0x70600003);
1322 
1323 	/*
1324 	 * Only reset the PHY when no link is currently established.
1325 	 * This is for the Atheros PCIe board which has problems to establish
1326 	 * the link (again) after this PHY reset. All other currently tested
1327 	 * PCIe boards don't show this problem.
1328 	 * This has to be re-tested and fixed in a later release!
1329 	 */
1330 	val = mfdcri(SDR0, port->sdr_base + PESDRn_LOOP);
1331 	if (!(val & 0x00001000))
1332 		ppc405ex_pcie_phy_reset(port);
1333 
1334 	dcr_write(port->dcrs, DCRO_PEGPL_CFG, 0x10000000);  /* guarded on */
1335 
1336 	port->has_ibpre = 1;
1337 
1338 	return ppc4xx_pciex_port_reset_sdr(port);
1339 }
1340 
ppc405ex_pciex_init_utl(struct ppc4xx_pciex_port * port)1341 static int ppc405ex_pciex_init_utl(struct ppc4xx_pciex_port *port)
1342 {
1343 	dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x0);
1344 
1345 	/*
1346 	 * Set buffer allocations and then assert VRB and TXE.
1347 	 */
1348 	out_be32(port->utl_base + PEUTL_OUTTR,   0x02000000);
1349 	out_be32(port->utl_base + PEUTL_INTR,    0x02000000);
1350 	out_be32(port->utl_base + PEUTL_OPDBSZ,  0x04000000);
1351 	out_be32(port->utl_base + PEUTL_PBBSZ,   0x21000000);
1352 	out_be32(port->utl_base + PEUTL_IPHBSZ,  0x02000000);
1353 	out_be32(port->utl_base + PEUTL_IPDBSZ,  0x04000000);
1354 	out_be32(port->utl_base + PEUTL_RCIRQEN, 0x00f00000);
1355 	out_be32(port->utl_base + PEUTL_PCTL,    0x80800066);
1356 
1357 	out_be32(port->utl_base + PEUTL_PBCTL,   0x08000000);
1358 
1359 	return 0;
1360 }
1361 
1362 static struct ppc4xx_pciex_hwops ppc405ex_pcie_hwops __initdata =
1363 {
1364 	.want_sdr	= true,
1365 	.core_init	= ppc405ex_pciex_core_init,
1366 	.port_init_hw	= ppc405ex_pciex_init_port_hw,
1367 	.setup_utl	= ppc405ex_pciex_init_utl,
1368 	.check_link	= ppc4xx_pciex_check_link_sdr,
1369 };
1370 
1371 #endif /* CONFIG_40x */
1372 
1373 #ifdef CONFIG_476FPE
ppc_476fpe_pciex_core_init(struct device_node * np)1374 static int __init ppc_476fpe_pciex_core_init(struct device_node *np)
1375 {
1376 	return 4;
1377 }
1378 
ppc_476fpe_pciex_check_link(struct ppc4xx_pciex_port * port)1379 static void __init ppc_476fpe_pciex_check_link(struct ppc4xx_pciex_port *port)
1380 {
1381 	u32 timeout_ms = 20;
1382 	u32 val = 0, mask = (PECFG_TLDLP_LNKUP|PECFG_TLDLP_PRESENT);
1383 	void __iomem *mbase = ioremap(port->cfg_space.start + 0x10000000,
1384 	                              0x1000);
1385 
1386 	printk(KERN_INFO "PCIE%d: Checking link...\n", port->index);
1387 
1388 	if (mbase == NULL) {
1389 		printk(KERN_WARNING "PCIE%d: failed to get cfg space\n",
1390 		                    port->index);
1391 		return;
1392 	}
1393 
1394 	while (timeout_ms--) {
1395 		val = in_le32(mbase + PECFG_TLDLP);
1396 
1397 		if ((val & mask) == mask)
1398 			break;
1399 		msleep(10);
1400 	}
1401 
1402 	if (val & PECFG_TLDLP_PRESENT) {
1403 		printk(KERN_INFO "PCIE%d: link is up !\n", port->index);
1404 		port->link = 1;
1405 	} else
1406 		printk(KERN_WARNING "PCIE%d: Link up failed\n", port->index);
1407 
1408 	iounmap(mbase);
1409 	return;
1410 }
1411 
1412 static struct ppc4xx_pciex_hwops ppc_476fpe_pcie_hwops __initdata =
1413 {
1414 	.core_init	= ppc_476fpe_pciex_core_init,
1415 	.check_link	= ppc_476fpe_pciex_check_link,
1416 };
1417 #endif /* CONFIG_476FPE */
1418 
1419 /* Check that the core has been initied and if not, do it */
ppc4xx_pciex_check_core_init(struct device_node * np)1420 static int __init ppc4xx_pciex_check_core_init(struct device_node *np)
1421 {
1422 	static int core_init;
1423 	int count = -ENODEV;
1424 
1425 	if (core_init++)
1426 		return 0;
1427 
1428 #ifdef CONFIG_44x
1429 	if (of_device_is_compatible(np, "ibm,plb-pciex-440spe")) {
1430 		if (ppc440spe_revA())
1431 			ppc4xx_pciex_hwops = &ppc440speA_pcie_hwops;
1432 		else
1433 			ppc4xx_pciex_hwops = &ppc440speB_pcie_hwops;
1434 	}
1435 	if (of_device_is_compatible(np, "ibm,plb-pciex-460ex"))
1436 		ppc4xx_pciex_hwops = &ppc460ex_pcie_hwops;
1437 	if (of_device_is_compatible(np, "ibm,plb-pciex-460sx"))
1438 		ppc4xx_pciex_hwops = &ppc460sx_pcie_hwops;
1439 	if (of_device_is_compatible(np, "ibm,plb-pciex-apm821xx"))
1440 		ppc4xx_pciex_hwops = &apm821xx_pcie_hwops;
1441 #endif /* CONFIG_44x    */
1442 #ifdef CONFIG_40x
1443 	if (of_device_is_compatible(np, "ibm,plb-pciex-405ex"))
1444 		ppc4xx_pciex_hwops = &ppc405ex_pcie_hwops;
1445 #endif
1446 #ifdef CONFIG_476FPE
1447 	if (of_device_is_compatible(np, "ibm,plb-pciex-476fpe")
1448 		|| of_device_is_compatible(np, "ibm,plb-pciex-476gtr"))
1449 		ppc4xx_pciex_hwops = &ppc_476fpe_pcie_hwops;
1450 #endif
1451 	if (ppc4xx_pciex_hwops == NULL) {
1452 		printk(KERN_WARNING "PCIE: unknown host type %s\n",
1453 		       np->full_name);
1454 		return -ENODEV;
1455 	}
1456 
1457 	count = ppc4xx_pciex_hwops->core_init(np);
1458 	if (count > 0) {
1459 		ppc4xx_pciex_ports =
1460 		       kzalloc(count * sizeof(struct ppc4xx_pciex_port),
1461 			       GFP_KERNEL);
1462 		if (ppc4xx_pciex_ports) {
1463 			ppc4xx_pciex_port_count = count;
1464 			return 0;
1465 		}
1466 		printk(KERN_WARNING "PCIE: failed to allocate ports array\n");
1467 		return -ENOMEM;
1468 	}
1469 	return -ENODEV;
1470 }
1471 
ppc4xx_pciex_port_init_mapping(struct ppc4xx_pciex_port * port)1472 static void __init ppc4xx_pciex_port_init_mapping(struct ppc4xx_pciex_port *port)
1473 {
1474 	/* We map PCI Express configuration based on the reg property */
1475 	dcr_write(port->dcrs, DCRO_PEGPL_CFGBAH,
1476 		  RES_TO_U32_HIGH(port->cfg_space.start));
1477 	dcr_write(port->dcrs, DCRO_PEGPL_CFGBAL,
1478 		  RES_TO_U32_LOW(port->cfg_space.start));
1479 
1480 	/* XXX FIXME: Use size from reg property. For now, map 512M */
1481 	dcr_write(port->dcrs, DCRO_PEGPL_CFGMSK, 0xe0000001);
1482 
1483 	/* We map UTL registers based on the reg property */
1484 	dcr_write(port->dcrs, DCRO_PEGPL_REGBAH,
1485 		  RES_TO_U32_HIGH(port->utl_regs.start));
1486 	dcr_write(port->dcrs, DCRO_PEGPL_REGBAL,
1487 		  RES_TO_U32_LOW(port->utl_regs.start));
1488 
1489 	/* XXX FIXME: Use size from reg property */
1490 	dcr_write(port->dcrs, DCRO_PEGPL_REGMSK, 0x00007001);
1491 
1492 	/* Disable all other outbound windows */
1493 	dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL, 0);
1494 	dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKL, 0);
1495 	dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKL, 0);
1496 	dcr_write(port->dcrs, DCRO_PEGPL_MSGMSK, 0);
1497 }
1498 
ppc4xx_pciex_port_init(struct ppc4xx_pciex_port * port)1499 static int __init ppc4xx_pciex_port_init(struct ppc4xx_pciex_port *port)
1500 {
1501 	int rc = 0;
1502 
1503 	/* Init HW */
1504 	if (ppc4xx_pciex_hwops->port_init_hw)
1505 		rc = ppc4xx_pciex_hwops->port_init_hw(port);
1506 	if (rc != 0)
1507 		return rc;
1508 
1509 	/*
1510 	 * Initialize mapping: disable all regions and configure
1511 	 * CFG and REG regions based on resources in the device tree
1512 	 */
1513 	ppc4xx_pciex_port_init_mapping(port);
1514 
1515 	if (ppc4xx_pciex_hwops->check_link)
1516 		ppc4xx_pciex_hwops->check_link(port);
1517 
1518 	/*
1519 	 * Map UTL
1520 	 */
1521 	port->utl_base = ioremap(port->utl_regs.start, 0x100);
1522 	BUG_ON(port->utl_base == NULL);
1523 
1524 	/*
1525 	 * Setup UTL registers --BenH.
1526 	 */
1527 	if (ppc4xx_pciex_hwops->setup_utl)
1528 		ppc4xx_pciex_hwops->setup_utl(port);
1529 
1530 	/*
1531 	 * Check for VC0 active or PLL Locked and assert RDY.
1532 	 */
1533 	if (port->sdr_base) {
1534 		if (of_device_is_compatible(port->node,
1535 				"ibm,plb-pciex-460sx")){
1536 			if (port->link && ppc4xx_pciex_wait_on_sdr(port,
1537 					PESDRn_RCSSTS,
1538 					1 << 12, 1 << 12, 5000)) {
1539 				printk(KERN_INFO "PCIE%d: PLL not locked\n",
1540 						port->index);
1541 				port->link = 0;
1542 			}
1543 		} else if (port->link &&
1544 			ppc4xx_pciex_wait_on_sdr(port, PESDRn_RCSSTS,
1545 				1 << 16, 1 << 16, 5000)) {
1546 			printk(KERN_INFO "PCIE%d: VC0 not active\n",
1547 					port->index);
1548 			port->link = 0;
1549 		}
1550 
1551 		dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET, 0, 1 << 20);
1552 	}
1553 
1554 	msleep(100);
1555 
1556 	return 0;
1557 }
1558 
ppc4xx_pciex_validate_bdf(struct ppc4xx_pciex_port * port,struct pci_bus * bus,unsigned int devfn)1559 static int ppc4xx_pciex_validate_bdf(struct ppc4xx_pciex_port *port,
1560 				     struct pci_bus *bus,
1561 				     unsigned int devfn)
1562 {
1563 	static int message;
1564 
1565 	/* Endpoint can not generate upstream(remote) config cycles */
1566 	if (port->endpoint && bus->number != port->hose->first_busno)
1567 		return PCIBIOS_DEVICE_NOT_FOUND;
1568 
1569 	/* Check we are within the mapped range */
1570 	if (bus->number > port->hose->last_busno) {
1571 		if (!message) {
1572 			printk(KERN_WARNING "Warning! Probing bus %u"
1573 			       " out of range !\n", bus->number);
1574 			message++;
1575 		}
1576 		return PCIBIOS_DEVICE_NOT_FOUND;
1577 	}
1578 
1579 	/* The root complex has only one device / function */
1580 	if (bus->number == port->hose->first_busno && devfn != 0)
1581 		return PCIBIOS_DEVICE_NOT_FOUND;
1582 
1583 	/* The other side of the RC has only one device as well */
1584 	if (bus->number == (port->hose->first_busno + 1) &&
1585 	    PCI_SLOT(devfn) != 0)
1586 		return PCIBIOS_DEVICE_NOT_FOUND;
1587 
1588 	/* Check if we have a link */
1589 	if ((bus->number != port->hose->first_busno) && !port->link)
1590 		return PCIBIOS_DEVICE_NOT_FOUND;
1591 
1592 	return 0;
1593 }
1594 
ppc4xx_pciex_get_config_base(struct ppc4xx_pciex_port * port,struct pci_bus * bus,unsigned int devfn)1595 static void __iomem *ppc4xx_pciex_get_config_base(struct ppc4xx_pciex_port *port,
1596 						  struct pci_bus *bus,
1597 						  unsigned int devfn)
1598 {
1599 	int relbus;
1600 
1601 	/* Remove the casts when we finally remove the stupid volatile
1602 	 * in struct pci_controller
1603 	 */
1604 	if (bus->number == port->hose->first_busno)
1605 		return (void __iomem *)port->hose->cfg_addr;
1606 
1607 	relbus = bus->number - (port->hose->first_busno + 1);
1608 	return (void __iomem *)port->hose->cfg_data +
1609 		((relbus  << 20) | (devfn << 12));
1610 }
1611 
ppc4xx_pciex_read_config(struct pci_bus * bus,unsigned int devfn,int offset,int len,u32 * val)1612 static int ppc4xx_pciex_read_config(struct pci_bus *bus, unsigned int devfn,
1613 				    int offset, int len, u32 *val)
1614 {
1615 	struct pci_controller *hose = pci_bus_to_host(bus);
1616 	struct ppc4xx_pciex_port *port =
1617 		&ppc4xx_pciex_ports[hose->indirect_type];
1618 	void __iomem *addr;
1619 	u32 gpl_cfg;
1620 
1621 	BUG_ON(hose != port->hose);
1622 
1623 	if (ppc4xx_pciex_validate_bdf(port, bus, devfn) != 0)
1624 		return PCIBIOS_DEVICE_NOT_FOUND;
1625 
1626 	addr = ppc4xx_pciex_get_config_base(port, bus, devfn);
1627 
1628 	/*
1629 	 * Reading from configuration space of non-existing device can
1630 	 * generate transaction errors. For the read duration we suppress
1631 	 * assertion of machine check exceptions to avoid those.
1632 	 */
1633 	gpl_cfg = dcr_read(port->dcrs, DCRO_PEGPL_CFG);
1634 	dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg | GPL_DMER_MASK_DISA);
1635 
1636 	/* Make sure no CRS is recorded */
1637 	out_be32(port->utl_base + PEUTL_RCSTA, 0x00040000);
1638 
1639 	switch (len) {
1640 	case 1:
1641 		*val = in_8((u8 *)(addr + offset));
1642 		break;
1643 	case 2:
1644 		*val = in_le16((u16 *)(addr + offset));
1645 		break;
1646 	default:
1647 		*val = in_le32((u32 *)(addr + offset));
1648 		break;
1649 	}
1650 
1651 	pr_debug("pcie-config-read: bus=%3d [%3d..%3d] devfn=0x%04x"
1652 		 " offset=0x%04x len=%d, addr=0x%p val=0x%08x\n",
1653 		 bus->number, hose->first_busno, hose->last_busno,
1654 		 devfn, offset, len, addr + offset, *val);
1655 
1656 	/* Check for CRS (440SPe rev B does that for us but heh ..) */
1657 	if (in_be32(port->utl_base + PEUTL_RCSTA) & 0x00040000) {
1658 		pr_debug("Got CRS !\n");
1659 		if (len != 4 || offset != 0)
1660 			return PCIBIOS_DEVICE_NOT_FOUND;
1661 		*val = 0xffff0001;
1662 	}
1663 
1664 	dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg);
1665 
1666 	return PCIBIOS_SUCCESSFUL;
1667 }
1668 
ppc4xx_pciex_write_config(struct pci_bus * bus,unsigned int devfn,int offset,int len,u32 val)1669 static int ppc4xx_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
1670 				     int offset, int len, u32 val)
1671 {
1672 	struct pci_controller *hose = pci_bus_to_host(bus);
1673 	struct ppc4xx_pciex_port *port =
1674 		&ppc4xx_pciex_ports[hose->indirect_type];
1675 	void __iomem *addr;
1676 	u32 gpl_cfg;
1677 
1678 	if (ppc4xx_pciex_validate_bdf(port, bus, devfn) != 0)
1679 		return PCIBIOS_DEVICE_NOT_FOUND;
1680 
1681 	addr = ppc4xx_pciex_get_config_base(port, bus, devfn);
1682 
1683 	/*
1684 	 * Reading from configuration space of non-existing device can
1685 	 * generate transaction errors. For the read duration we suppress
1686 	 * assertion of machine check exceptions to avoid those.
1687 	 */
1688 	gpl_cfg = dcr_read(port->dcrs, DCRO_PEGPL_CFG);
1689 	dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg | GPL_DMER_MASK_DISA);
1690 
1691 	pr_debug("pcie-config-write: bus=%3d [%3d..%3d] devfn=0x%04x"
1692 		 " offset=0x%04x len=%d, addr=0x%p val=0x%08x\n",
1693 		 bus->number, hose->first_busno, hose->last_busno,
1694 		 devfn, offset, len, addr + offset, val);
1695 
1696 	switch (len) {
1697 	case 1:
1698 		out_8((u8 *)(addr + offset), val);
1699 		break;
1700 	case 2:
1701 		out_le16((u16 *)(addr + offset), val);
1702 		break;
1703 	default:
1704 		out_le32((u32 *)(addr + offset), val);
1705 		break;
1706 	}
1707 
1708 	dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg);
1709 
1710 	return PCIBIOS_SUCCESSFUL;
1711 }
1712 
1713 static struct pci_ops ppc4xx_pciex_pci_ops =
1714 {
1715 	.read  = ppc4xx_pciex_read_config,
1716 	.write = ppc4xx_pciex_write_config,
1717 };
1718 
ppc4xx_setup_one_pciex_POM(struct ppc4xx_pciex_port * port,struct pci_controller * hose,void __iomem * mbase,u64 plb_addr,u64 pci_addr,u64 size,unsigned int flags,int index)1719 static int __init ppc4xx_setup_one_pciex_POM(struct ppc4xx_pciex_port	*port,
1720 					     struct pci_controller	*hose,
1721 					     void __iomem		*mbase,
1722 					     u64			plb_addr,
1723 					     u64			pci_addr,
1724 					     u64			size,
1725 					     unsigned int		flags,
1726 					     int			index)
1727 {
1728 	u32 lah, lal, pciah, pcial, sa;
1729 
1730 	if (!is_power_of_2(size) ||
1731 	    (index < 2 && size < 0x100000) ||
1732 	    (index == 2 && size < 0x100) ||
1733 	    (plb_addr & (size - 1)) != 0) {
1734 		printk(KERN_WARNING "%s: Resource out of range\n",
1735 		       hose->dn->full_name);
1736 		return -1;
1737 	}
1738 
1739 	/* Calculate register values */
1740 	lah = RES_TO_U32_HIGH(plb_addr);
1741 	lal = RES_TO_U32_LOW(plb_addr);
1742 	pciah = RES_TO_U32_HIGH(pci_addr);
1743 	pcial = RES_TO_U32_LOW(pci_addr);
1744 	sa = (0xffffffffu << ilog2(size)) | 0x1;
1745 
1746 	/* Program register values */
1747 	switch (index) {
1748 	case 0:
1749 		out_le32(mbase + PECFG_POM0LAH, pciah);
1750 		out_le32(mbase + PECFG_POM0LAL, pcial);
1751 		dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAH, lah);
1752 		dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAL, lal);
1753 		dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKH, 0x7fffffff);
1754 		/*Enabled and single region */
1755 		if (of_device_is_compatible(port->node, "ibm,plb-pciex-460sx"))
1756 			dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL,
1757 				sa | DCRO_PEGPL_460SX_OMR1MSKL_UOT
1758 					| DCRO_PEGPL_OMRxMSKL_VAL);
1759 		else if (of_device_is_compatible(
1760 				port->node, "ibm,plb-pciex-476fpe") ||
1761 			of_device_is_compatible(
1762 				port->node, "ibm,plb-pciex-476gtr"))
1763 			dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL,
1764 				sa | DCRO_PEGPL_476FPE_OMR1MSKL_UOT
1765 					| DCRO_PEGPL_OMRxMSKL_VAL);
1766 		else
1767 			dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL,
1768 				sa | DCRO_PEGPL_OMR1MSKL_UOT
1769 					| DCRO_PEGPL_OMRxMSKL_VAL);
1770 		break;
1771 	case 1:
1772 		out_le32(mbase + PECFG_POM1LAH, pciah);
1773 		out_le32(mbase + PECFG_POM1LAL, pcial);
1774 		dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAH, lah);
1775 		dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAL, lal);
1776 		dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKH, 0x7fffffff);
1777 		dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKL,
1778 				sa | DCRO_PEGPL_OMRxMSKL_VAL);
1779 		break;
1780 	case 2:
1781 		out_le32(mbase + PECFG_POM2LAH, pciah);
1782 		out_le32(mbase + PECFG_POM2LAL, pcial);
1783 		dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAH, lah);
1784 		dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAL, lal);
1785 		dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKH, 0x7fffffff);
1786 		/* Note that 3 here means enabled | IO space !!! */
1787 		dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKL,
1788 				sa | DCRO_PEGPL_OMR3MSKL_IO
1789 					| DCRO_PEGPL_OMRxMSKL_VAL);
1790 		break;
1791 	}
1792 
1793 	return 0;
1794 }
1795 
ppc4xx_configure_pciex_POMs(struct ppc4xx_pciex_port * port,struct pci_controller * hose,void __iomem * mbase)1796 static void __init ppc4xx_configure_pciex_POMs(struct ppc4xx_pciex_port *port,
1797 					       struct pci_controller *hose,
1798 					       void __iomem *mbase)
1799 {
1800 	int i, j, found_isa_hole = 0;
1801 
1802 	/* Setup outbound memory windows */
1803 	for (i = j = 0; i < 3; i++) {
1804 		struct resource *res = &hose->mem_resources[i];
1805 		resource_size_t offset = hose->mem_offset[i];
1806 
1807 		/* we only care about memory windows */
1808 		if (!(res->flags & IORESOURCE_MEM))
1809 			continue;
1810 		if (j > 1) {
1811 			printk(KERN_WARNING "%s: Too many ranges\n",
1812 			       port->node->full_name);
1813 			break;
1814 		}
1815 
1816 		/* Configure the resource */
1817 		if (ppc4xx_setup_one_pciex_POM(port, hose, mbase,
1818 					       res->start,
1819 					       res->start - offset,
1820 					       resource_size(res),
1821 					       res->flags,
1822 					       j) == 0) {
1823 			j++;
1824 
1825 			/* If the resource PCI address is 0 then we have our
1826 			 * ISA memory hole
1827 			 */
1828 			if (res->start == offset)
1829 				found_isa_hole = 1;
1830 		}
1831 	}
1832 
1833 	/* Handle ISA memory hole if not already covered */
1834 	if (j <= 1 && !found_isa_hole && hose->isa_mem_size)
1835 		if (ppc4xx_setup_one_pciex_POM(port, hose, mbase,
1836 					       hose->isa_mem_phys, 0,
1837 					       hose->isa_mem_size, 0, j) == 0)
1838 			printk(KERN_INFO "%s: Legacy ISA memory support enabled\n",
1839 			       hose->dn->full_name);
1840 
1841 	/* Configure IO, always 64K starting at 0. We hard wire it to 64K !
1842 	 * Note also that it -has- to be region index 2 on this HW
1843 	 */
1844 	if (hose->io_resource.flags & IORESOURCE_IO)
1845 		ppc4xx_setup_one_pciex_POM(port, hose, mbase,
1846 					   hose->io_base_phys, 0,
1847 					   0x10000, IORESOURCE_IO, 2);
1848 }
1849 
ppc4xx_configure_pciex_PIMs(struct ppc4xx_pciex_port * port,struct pci_controller * hose,void __iomem * mbase,struct resource * res)1850 static void __init ppc4xx_configure_pciex_PIMs(struct ppc4xx_pciex_port *port,
1851 					       struct pci_controller *hose,
1852 					       void __iomem *mbase,
1853 					       struct resource *res)
1854 {
1855 	resource_size_t size = resource_size(res);
1856 	u64 sa;
1857 
1858 	if (port->endpoint) {
1859 		resource_size_t ep_addr = 0;
1860 		resource_size_t ep_size = 32 << 20;
1861 
1862 		/* Currently we map a fixed 64MByte window to PLB address
1863 		 * 0 (SDRAM). This should probably be configurable via a dts
1864 		 * property.
1865 		 */
1866 
1867 		/* Calculate window size */
1868 		sa = (0xffffffffffffffffull << ilog2(ep_size));
1869 
1870 		/* Setup BAR0 */
1871 		out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa));
1872 		out_le32(mbase + PECFG_BAR0LMPA, RES_TO_U32_LOW(sa) |
1873 			 PCI_BASE_ADDRESS_MEM_TYPE_64);
1874 
1875 		/* Disable BAR1 & BAR2 */
1876 		out_le32(mbase + PECFG_BAR1MPA, 0);
1877 		out_le32(mbase + PECFG_BAR2HMPA, 0);
1878 		out_le32(mbase + PECFG_BAR2LMPA, 0);
1879 
1880 		out_le32(mbase + PECFG_PIM01SAH, RES_TO_U32_HIGH(sa));
1881 		out_le32(mbase + PECFG_PIM01SAL, RES_TO_U32_LOW(sa));
1882 
1883 		out_le32(mbase + PCI_BASE_ADDRESS_0, RES_TO_U32_LOW(ep_addr));
1884 		out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(ep_addr));
1885 	} else {
1886 		/* Calculate window size */
1887 		sa = (0xffffffffffffffffull << ilog2(size));
1888 		if (res->flags & IORESOURCE_PREFETCH)
1889 			sa |= PCI_BASE_ADDRESS_MEM_PREFETCH;
1890 
1891 		if (of_device_is_compatible(port->node, "ibm,plb-pciex-460sx") ||
1892 		    of_device_is_compatible(
1893 			    port->node, "ibm,plb-pciex-476fpe") ||
1894 		    of_device_is_compatible(
1895 			    port->node, "ibm,plb-pciex-476gtr"))
1896 			sa |= PCI_BASE_ADDRESS_MEM_TYPE_64;
1897 
1898 		out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa));
1899 		out_le32(mbase + PECFG_BAR0LMPA, RES_TO_U32_LOW(sa));
1900 
1901 		/* The setup of the split looks weird to me ... let's see
1902 		 * if it works
1903 		 */
1904 		out_le32(mbase + PECFG_PIM0LAL, 0x00000000);
1905 		out_le32(mbase + PECFG_PIM0LAH, 0x00000000);
1906 		out_le32(mbase + PECFG_PIM1LAL, 0x00000000);
1907 		out_le32(mbase + PECFG_PIM1LAH, 0x00000000);
1908 		out_le32(mbase + PECFG_PIM01SAH, 0xffff0000);
1909 		out_le32(mbase + PECFG_PIM01SAL, 0x00000000);
1910 
1911 		out_le32(mbase + PCI_BASE_ADDRESS_0, RES_TO_U32_LOW(res->start));
1912 		out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(res->start));
1913 	}
1914 
1915 	/* Enable inbound mapping */
1916 	out_le32(mbase + PECFG_PIMEN, 0x1);
1917 
1918 	/* Enable I/O, Mem, and Busmaster cycles */
1919 	out_le16(mbase + PCI_COMMAND,
1920 		 in_le16(mbase + PCI_COMMAND) |
1921 		 PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1922 }
1923 
ppc4xx_pciex_port_setup_hose(struct ppc4xx_pciex_port * port)1924 static void __init ppc4xx_pciex_port_setup_hose(struct ppc4xx_pciex_port *port)
1925 {
1926 	struct resource dma_window;
1927 	struct pci_controller *hose = NULL;
1928 	const int *bus_range;
1929 	int primary = 0, busses;
1930 	void __iomem *mbase = NULL, *cfg_data = NULL;
1931 	const u32 *pval;
1932 	u32 val;
1933 
1934 	/* Check if primary bridge */
1935 	if (of_get_property(port->node, "primary", NULL))
1936 		primary = 1;
1937 
1938 	/* Get bus range if any */
1939 	bus_range = of_get_property(port->node, "bus-range", NULL);
1940 
1941 	/* Allocate the host controller data structure */
1942 	hose = pcibios_alloc_controller(port->node);
1943 	if (!hose)
1944 		goto fail;
1945 
1946 	/* We stick the port number in "indirect_type" so the config space
1947 	 * ops can retrieve the port data structure easily
1948 	 */
1949 	hose->indirect_type = port->index;
1950 
1951 	/* Get bus range */
1952 	hose->first_busno = bus_range ? bus_range[0] : 0x0;
1953 	hose->last_busno = bus_range ? bus_range[1] : 0xff;
1954 
1955 	/* Because of how big mapping the config space is (1M per bus), we
1956 	 * limit how many busses we support. In the long run, we could replace
1957 	 * that with something akin to kmap_atomic instead. We set aside 1 bus
1958 	 * for the host itself too.
1959 	 */
1960 	busses = hose->last_busno - hose->first_busno; /* This is off by 1 */
1961 	if (busses > MAX_PCIE_BUS_MAPPED) {
1962 		busses = MAX_PCIE_BUS_MAPPED;
1963 		hose->last_busno = hose->first_busno + busses;
1964 	}
1965 
1966 	if (!port->endpoint) {
1967 		/* Only map the external config space in cfg_data for
1968 		 * PCIe root-complexes. External space is 1M per bus
1969 		 */
1970 		cfg_data = ioremap(port->cfg_space.start +
1971 				   (hose->first_busno + 1) * 0x100000,
1972 				   busses * 0x100000);
1973 		if (cfg_data == NULL) {
1974 			printk(KERN_ERR "%s: Can't map external config space !",
1975 			       port->node->full_name);
1976 			goto fail;
1977 		}
1978 		hose->cfg_data = cfg_data;
1979 	}
1980 
1981 	/* Always map the host config space in cfg_addr.
1982 	 * Internal space is 4K
1983 	 */
1984 	mbase = ioremap(port->cfg_space.start + 0x10000000, 0x1000);
1985 	if (mbase == NULL) {
1986 		printk(KERN_ERR "%s: Can't map internal config space !",
1987 		       port->node->full_name);
1988 		goto fail;
1989 	}
1990 	hose->cfg_addr = mbase;
1991 
1992 	pr_debug("PCIE %s, bus %d..%d\n", port->node->full_name,
1993 		 hose->first_busno, hose->last_busno);
1994 	pr_debug("     config space mapped at: root @0x%p, other @0x%p\n",
1995 		 hose->cfg_addr, hose->cfg_data);
1996 
1997 	/* Setup config space */
1998 	hose->ops = &ppc4xx_pciex_pci_ops;
1999 	port->hose = hose;
2000 	mbase = (void __iomem *)hose->cfg_addr;
2001 
2002 	if (!port->endpoint) {
2003 		/*
2004 		 * Set bus numbers on our root port
2005 		 */
2006 		out_8(mbase + PCI_PRIMARY_BUS, hose->first_busno);
2007 		out_8(mbase + PCI_SECONDARY_BUS, hose->first_busno + 1);
2008 		out_8(mbase + PCI_SUBORDINATE_BUS, hose->last_busno);
2009 	}
2010 
2011 	/*
2012 	 * OMRs are already reset, also disable PIMs
2013 	 */
2014 	out_le32(mbase + PECFG_PIMEN, 0);
2015 
2016 	/* Parse outbound mapping resources */
2017 	pci_process_bridge_OF_ranges(hose, port->node, primary);
2018 
2019 	/* Parse inbound mapping resources */
2020 	if (ppc4xx_parse_dma_ranges(hose, mbase, &dma_window) != 0)
2021 		goto fail;
2022 
2023 	/* Configure outbound ranges POMs */
2024 	ppc4xx_configure_pciex_POMs(port, hose, mbase);
2025 
2026 	/* Configure inbound ranges PIMs */
2027 	ppc4xx_configure_pciex_PIMs(port, hose, mbase, &dma_window);
2028 
2029 	/* The root complex doesn't show up if we don't set some vendor
2030 	 * and device IDs into it. The defaults below are the same bogus
2031 	 * one that the initial code in arch/ppc had. This can be
2032 	 * overwritten by setting the "vendor-id/device-id" properties
2033 	 * in the pciex node.
2034 	 */
2035 
2036 	/* Get the (optional) vendor-/device-id from the device-tree */
2037 	pval = of_get_property(port->node, "vendor-id", NULL);
2038 	if (pval) {
2039 		val = *pval;
2040 	} else {
2041 		if (!port->endpoint)
2042 			val = 0xaaa0 + port->index;
2043 		else
2044 			val = 0xeee0 + port->index;
2045 	}
2046 	out_le16(mbase + 0x200, val);
2047 
2048 	pval = of_get_property(port->node, "device-id", NULL);
2049 	if (pval) {
2050 		val = *pval;
2051 	} else {
2052 		if (!port->endpoint)
2053 			val = 0xbed0 + port->index;
2054 		else
2055 			val = 0xfed0 + port->index;
2056 	}
2057 	out_le16(mbase + 0x202, val);
2058 
2059 	/* Enable Bus master, memory, and io space */
2060 	if (of_device_is_compatible(port->node, "ibm,plb-pciex-460sx"))
2061 		out_le16(mbase + 0x204, 0x7);
2062 
2063 	if (!port->endpoint) {
2064 		/* Set Class Code to PCI-PCI bridge and Revision Id to 1 */
2065 		out_le32(mbase + 0x208, 0x06040001);
2066 
2067 		printk(KERN_INFO "PCIE%d: successfully set as root-complex\n",
2068 		       port->index);
2069 	} else {
2070 		/* Set Class Code to Processor/PPC */
2071 		out_le32(mbase + 0x208, 0x0b200001);
2072 
2073 		printk(KERN_INFO "PCIE%d: successfully set as endpoint\n",
2074 		       port->index);
2075 	}
2076 
2077 	return;
2078  fail:
2079 	if (hose)
2080 		pcibios_free_controller(hose);
2081 	if (cfg_data)
2082 		iounmap(cfg_data);
2083 	if (mbase)
2084 		iounmap(mbase);
2085 }
2086 
ppc4xx_probe_pciex_bridge(struct device_node * np)2087 static void __init ppc4xx_probe_pciex_bridge(struct device_node *np)
2088 {
2089 	struct ppc4xx_pciex_port *port;
2090 	const u32 *pval;
2091 	int portno;
2092 	unsigned int dcrs;
2093 	const char *val;
2094 
2095 	/* First, proceed to core initialization as we assume there's
2096 	 * only one PCIe core in the system
2097 	 */
2098 	if (ppc4xx_pciex_check_core_init(np))
2099 		return;
2100 
2101 	/* Get the port number from the device-tree */
2102 	pval = of_get_property(np, "port", NULL);
2103 	if (pval == NULL) {
2104 		printk(KERN_ERR "PCIE: Can't find port number for %s\n",
2105 		       np->full_name);
2106 		return;
2107 	}
2108 	portno = *pval;
2109 	if (portno >= ppc4xx_pciex_port_count) {
2110 		printk(KERN_ERR "PCIE: port number out of range for %s\n",
2111 		       np->full_name);
2112 		return;
2113 	}
2114 	port = &ppc4xx_pciex_ports[portno];
2115 	port->index = portno;
2116 
2117 	/*
2118 	 * Check if device is enabled
2119 	 */
2120 	if (!of_device_is_available(np)) {
2121 		printk(KERN_INFO "PCIE%d: Port disabled via device-tree\n", port->index);
2122 		return;
2123 	}
2124 
2125 	port->node = of_node_get(np);
2126 	if (ppc4xx_pciex_hwops->want_sdr) {
2127 		pval = of_get_property(np, "sdr-base", NULL);
2128 		if (pval == NULL) {
2129 			printk(KERN_ERR "PCIE: missing sdr-base for %s\n",
2130 			       np->full_name);
2131 			return;
2132 		}
2133 		port->sdr_base = *pval;
2134 	}
2135 
2136 	/* Check if device_type property is set to "pci" or "pci-endpoint".
2137 	 * Resulting from this setup this PCIe port will be configured
2138 	 * as root-complex or as endpoint.
2139 	 */
2140 	val = of_get_property(port->node, "device_type", NULL);
2141 	if (!strcmp(val, "pci-endpoint")) {
2142 		port->endpoint = 1;
2143 	} else if (!strcmp(val, "pci")) {
2144 		port->endpoint = 0;
2145 	} else {
2146 		printk(KERN_ERR "PCIE: missing or incorrect device_type for %s\n",
2147 		       np->full_name);
2148 		return;
2149 	}
2150 
2151 	/* Fetch config space registers address */
2152 	if (of_address_to_resource(np, 0, &port->cfg_space)) {
2153 		printk(KERN_ERR "%s: Can't get PCI-E config space !",
2154 		       np->full_name);
2155 		return;
2156 	}
2157 	/* Fetch host bridge internal registers address */
2158 	if (of_address_to_resource(np, 1, &port->utl_regs)) {
2159 		printk(KERN_ERR "%s: Can't get UTL register base !",
2160 		       np->full_name);
2161 		return;
2162 	}
2163 
2164 	/* Map DCRs */
2165 	dcrs = dcr_resource_start(np, 0);
2166 	if (dcrs == 0) {
2167 		printk(KERN_ERR "%s: Can't get DCR register base !",
2168 		       np->full_name);
2169 		return;
2170 	}
2171 	port->dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
2172 
2173 	/* Initialize the port specific registers */
2174 	if (ppc4xx_pciex_port_init(port)) {
2175 		printk(KERN_WARNING "PCIE%d: Port init failed\n", port->index);
2176 		return;
2177 	}
2178 
2179 	/* Setup the linux hose data structure */
2180 	ppc4xx_pciex_port_setup_hose(port);
2181 }
2182 
2183 #endif /* CONFIG_PPC4xx_PCI_EXPRESS */
2184 
ppc4xx_pci_find_bridges(void)2185 static int __init ppc4xx_pci_find_bridges(void)
2186 {
2187 	struct device_node *np;
2188 
2189 	pci_add_flags(PCI_ENABLE_PROC_DOMAINS | PCI_COMPAT_DOMAIN_0);
2190 
2191 #ifdef CONFIG_PPC4xx_PCI_EXPRESS
2192 	for_each_compatible_node(np, NULL, "ibm,plb-pciex")
2193 		ppc4xx_probe_pciex_bridge(np);
2194 #endif
2195 	for_each_compatible_node(np, NULL, "ibm,plb-pcix")
2196 		ppc4xx_probe_pcix_bridge(np);
2197 	for_each_compatible_node(np, NULL, "ibm,plb-pci")
2198 		ppc4xx_probe_pci_bridge(np);
2199 
2200 	return 0;
2201 }
2202 arch_initcall(ppc4xx_pci_find_bridges);
2203 
2204