• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Broadcom specific AMBA
3  * PCI Core in hostmode
4  *
5  * Copyright 2005 - 2011, Broadcom Corporation
6  * Copyright 2006, 2007, Michael Buesch <m@bues.ch>
7  * Copyright 2011, 2012, Hauke Mehrtens <hauke@hauke-m.de>
8  *
9  * Licensed under the GNU/GPL. See COPYING for details.
10  */
11 
12 #include "bcma_private.h"
13 #include <linux/pci.h>
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/bcma/bcma.h>
17 #include <asm/paccess.h>
18 
19 /* Probe a 32bit value on the bus and catch bus exceptions.
20  * Returns nonzero on a bus exception.
21  * This is MIPS specific */
22 #define mips_busprobe32(val, addr)	get_dbe((val), ((u32 *)(addr)))
23 
24 /* Assume one-hot slot wiring */
25 #define BCMA_PCI_SLOT_MAX	16
26 #define	PCI_CONFIG_SPACE_SIZE	256
27 
bcma_core_pci_is_in_hostmode(struct bcma_drv_pci * pc)28 bool bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc)
29 {
30 	struct bcma_bus *bus = pc->core->bus;
31 	u16 chipid_top;
32 	u32 tmp;
33 
34 	chipid_top = (bus->chipinfo.id & 0xFF00);
35 	if (chipid_top != 0x4700 &&
36 	    chipid_top != 0x5300)
37 		return false;
38 
39 	bcma_core_enable(pc->core, 0);
40 
41 	return !mips_busprobe32(tmp, pc->core->io_addr);
42 }
43 
bcma_pcie_read_config(struct bcma_drv_pci * pc,u32 address)44 static u32 bcma_pcie_read_config(struct bcma_drv_pci *pc, u32 address)
45 {
46 	pcicore_write32(pc, BCMA_CORE_PCI_CONFIG_ADDR, address);
47 	pcicore_read32(pc, BCMA_CORE_PCI_CONFIG_ADDR);
48 	return pcicore_read32(pc, BCMA_CORE_PCI_CONFIG_DATA);
49 }
50 
bcma_pcie_write_config(struct bcma_drv_pci * pc,u32 address,u32 data)51 static void bcma_pcie_write_config(struct bcma_drv_pci *pc, u32 address,
52 				   u32 data)
53 {
54 	pcicore_write32(pc, BCMA_CORE_PCI_CONFIG_ADDR, address);
55 	pcicore_read32(pc, BCMA_CORE_PCI_CONFIG_ADDR);
56 	pcicore_write32(pc, BCMA_CORE_PCI_CONFIG_DATA, data);
57 }
58 
bcma_get_cfgspace_addr(struct bcma_drv_pci * pc,unsigned int dev,unsigned int func,unsigned int off)59 static u32 bcma_get_cfgspace_addr(struct bcma_drv_pci *pc, unsigned int dev,
60 			     unsigned int func, unsigned int off)
61 {
62 	u32 addr = 0;
63 
64 	/* Issue config commands only when the data link is up (atleast
65 	 * one external pcie device is present).
66 	 */
67 	if (dev >= 2 || !(bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_LSREG)
68 			  & BCMA_CORE_PCI_DLLP_LSREG_LINKUP))
69 		goto out;
70 
71 	/* Type 0 transaction */
72 	/* Slide the PCI window to the appropriate slot */
73 	pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI1, BCMA_CORE_PCI_SBTOPCI_CFG0);
74 	/* Calculate the address */
75 	addr = pc->host_controller->host_cfg_addr;
76 	addr |= (dev << BCMA_CORE_PCI_CFG_SLOT_SHIFT);
77 	addr |= (func << BCMA_CORE_PCI_CFG_FUN_SHIFT);
78 	addr |= (off & ~3);
79 
80 out:
81 	return addr;
82 }
83 
bcma_extpci_read_config(struct bcma_drv_pci * pc,unsigned int dev,unsigned int func,unsigned int off,void * buf,int len)84 static int bcma_extpci_read_config(struct bcma_drv_pci *pc, unsigned int dev,
85 				  unsigned int func, unsigned int off,
86 				  void *buf, int len)
87 {
88 	int err = -EINVAL;
89 	u32 addr, val;
90 	void __iomem *mmio = 0;
91 
92 	WARN_ON(!pc->hostmode);
93 	if (unlikely(len != 1 && len != 2 && len != 4))
94 		goto out;
95 	if (dev == 0) {
96 		/* we support only two functions on device 0 */
97 		if (func > 1)
98 			goto out;
99 
100 		/* accesses to config registers with offsets >= 256
101 		 * requires indirect access.
102 		 */
103 		if (off >= PCI_CONFIG_SPACE_SIZE) {
104 			addr = (func << 12);
105 			addr |= (off & 0x0FFC);
106 			val = bcma_pcie_read_config(pc, addr);
107 		} else {
108 			addr = BCMA_CORE_PCI_PCICFG0;
109 			addr |= (func << 8);
110 			addr |= (off & 0xFC);
111 			val = pcicore_read32(pc, addr);
112 		}
113 	} else {
114 		addr = bcma_get_cfgspace_addr(pc, dev, func, off);
115 		if (unlikely(!addr))
116 			goto out;
117 		err = -ENOMEM;
118 		mmio = ioremap(addr, sizeof(val));
119 		if (!mmio)
120 			goto out;
121 
122 		if (mips_busprobe32(val, mmio)) {
123 			val = 0xFFFFFFFF;
124 			goto unmap;
125 		}
126 	}
127 	val >>= (8 * (off & 3));
128 
129 	switch (len) {
130 	case 1:
131 		*((u8 *)buf) = (u8)val;
132 		break;
133 	case 2:
134 		*((u16 *)buf) = (u16)val;
135 		break;
136 	case 4:
137 		*((u32 *)buf) = (u32)val;
138 		break;
139 	}
140 	err = 0;
141 unmap:
142 	if (mmio)
143 		iounmap(mmio);
144 out:
145 	return err;
146 }
147 
bcma_extpci_write_config(struct bcma_drv_pci * pc,unsigned int dev,unsigned int func,unsigned int off,const void * buf,int len)148 static int bcma_extpci_write_config(struct bcma_drv_pci *pc, unsigned int dev,
149 				   unsigned int func, unsigned int off,
150 				   const void *buf, int len)
151 {
152 	int err = -EINVAL;
153 	u32 addr, val;
154 	void __iomem *mmio = 0;
155 	u16 chipid = pc->core->bus->chipinfo.id;
156 
157 	WARN_ON(!pc->hostmode);
158 	if (unlikely(len != 1 && len != 2 && len != 4))
159 		goto out;
160 	if (dev == 0) {
161 		/* we support only two functions on device 0 */
162 		if (func > 1)
163 			goto out;
164 
165 		/* accesses to config registers with offsets >= 256
166 		 * requires indirect access.
167 		 */
168 		if (off >= PCI_CONFIG_SPACE_SIZE) {
169 			addr = (func << 12);
170 			addr |= (off & 0x0FFC);
171 			val = bcma_pcie_read_config(pc, addr);
172 		} else {
173 			addr = BCMA_CORE_PCI_PCICFG0;
174 			addr |= (func << 8);
175 			addr |= (off & 0xFC);
176 			val = pcicore_read32(pc, addr);
177 		}
178 	} else {
179 		addr = bcma_get_cfgspace_addr(pc, dev, func, off);
180 		if (unlikely(!addr))
181 			goto out;
182 		err = -ENOMEM;
183 		mmio = ioremap(addr, sizeof(val));
184 		if (!mmio)
185 			goto out;
186 
187 		if (mips_busprobe32(val, mmio)) {
188 			val = 0xFFFFFFFF;
189 			goto unmap;
190 		}
191 	}
192 
193 	switch (len) {
194 	case 1:
195 		val &= ~(0xFF << (8 * (off & 3)));
196 		val |= *((const u8 *)buf) << (8 * (off & 3));
197 		break;
198 	case 2:
199 		val &= ~(0xFFFF << (8 * (off & 3)));
200 		val |= *((const u16 *)buf) << (8 * (off & 3));
201 		break;
202 	case 4:
203 		val = *((const u32 *)buf);
204 		break;
205 	}
206 	if (dev == 0) {
207 		/* accesses to config registers with offsets >= 256
208 		 * requires indirect access.
209 		 */
210 		if (off >= PCI_CONFIG_SPACE_SIZE)
211 			bcma_pcie_write_config(pc, addr, val);
212 		else
213 			pcicore_write32(pc, addr, val);
214 	} else {
215 		writel(val, mmio);
216 
217 		if (chipid == BCMA_CHIP_ID_BCM4716 ||
218 		    chipid == BCMA_CHIP_ID_BCM4748)
219 			readl(mmio);
220 	}
221 
222 	err = 0;
223 unmap:
224 	if (mmio)
225 		iounmap(mmio);
226 out:
227 	return err;
228 }
229 
bcma_core_pci_hostmode_read_config(struct pci_bus * bus,unsigned int devfn,int reg,int size,u32 * val)230 static int bcma_core_pci_hostmode_read_config(struct pci_bus *bus,
231 					      unsigned int devfn,
232 					      int reg, int size, u32 *val)
233 {
234 	unsigned long flags;
235 	int err;
236 	struct bcma_drv_pci *pc;
237 	struct bcma_drv_pci_host *pc_host;
238 
239 	pc_host = container_of(bus->ops, struct bcma_drv_pci_host, pci_ops);
240 	pc = pc_host->pdev;
241 
242 	spin_lock_irqsave(&pc_host->cfgspace_lock, flags);
243 	err = bcma_extpci_read_config(pc, PCI_SLOT(devfn),
244 				     PCI_FUNC(devfn), reg, val, size);
245 	spin_unlock_irqrestore(&pc_host->cfgspace_lock, flags);
246 
247 	return err ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
248 }
249 
bcma_core_pci_hostmode_write_config(struct pci_bus * bus,unsigned int devfn,int reg,int size,u32 val)250 static int bcma_core_pci_hostmode_write_config(struct pci_bus *bus,
251 					       unsigned int devfn,
252 					       int reg, int size, u32 val)
253 {
254 	unsigned long flags;
255 	int err;
256 	struct bcma_drv_pci *pc;
257 	struct bcma_drv_pci_host *pc_host;
258 
259 	pc_host = container_of(bus->ops, struct bcma_drv_pci_host, pci_ops);
260 	pc = pc_host->pdev;
261 
262 	spin_lock_irqsave(&pc_host->cfgspace_lock, flags);
263 	err = bcma_extpci_write_config(pc, PCI_SLOT(devfn),
264 				      PCI_FUNC(devfn), reg, &val, size);
265 	spin_unlock_irqrestore(&pc_host->cfgspace_lock, flags);
266 
267 	return err ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
268 }
269 
270 /* return cap_offset if requested capability exists in the PCI config space */
bcma_find_pci_capability(struct bcma_drv_pci * pc,unsigned int dev,unsigned int func,u8 req_cap_id,unsigned char * buf,u32 * buflen)271 static u8 bcma_find_pci_capability(struct bcma_drv_pci *pc, unsigned int dev,
272 				   unsigned int func, u8 req_cap_id,
273 				   unsigned char *buf, u32 *buflen)
274 {
275 	u8 cap_id;
276 	u8 cap_ptr = 0;
277 	u32 bufsize;
278 	u8 byte_val;
279 
280 	/* check for Header type 0 */
281 	bcma_extpci_read_config(pc, dev, func, PCI_HEADER_TYPE, &byte_val,
282 				sizeof(u8));
283 	if ((byte_val & 0x7F) != PCI_HEADER_TYPE_NORMAL)
284 		return cap_ptr;
285 
286 	/* check if the capability pointer field exists */
287 	bcma_extpci_read_config(pc, dev, func, PCI_STATUS, &byte_val,
288 				sizeof(u8));
289 	if (!(byte_val & PCI_STATUS_CAP_LIST))
290 		return cap_ptr;
291 
292 	/* check if the capability pointer is 0x00 */
293 	bcma_extpci_read_config(pc, dev, func, PCI_CAPABILITY_LIST, &cap_ptr,
294 				sizeof(u8));
295 	if (cap_ptr == 0x00)
296 		return cap_ptr;
297 
298 	/* loop thr'u the capability list and see if the requested capabilty
299 	 * exists */
300 	bcma_extpci_read_config(pc, dev, func, cap_ptr, &cap_id, sizeof(u8));
301 	while (cap_id != req_cap_id) {
302 		bcma_extpci_read_config(pc, dev, func, cap_ptr + 1, &cap_ptr,
303 					sizeof(u8));
304 		if (cap_ptr == 0x00)
305 			return cap_ptr;
306 		bcma_extpci_read_config(pc, dev, func, cap_ptr, &cap_id,
307 					sizeof(u8));
308 	}
309 
310 	/* found the caller requested capability */
311 	if ((buf != NULL) && (buflen != NULL)) {
312 		u8 cap_data;
313 
314 		bufsize = *buflen;
315 		if (!bufsize)
316 			return cap_ptr;
317 
318 		*buflen = 0;
319 
320 		/* copy the cpability data excluding cap ID and next ptr */
321 		cap_data = cap_ptr + 2;
322 		if ((bufsize + cap_data)  > PCI_CONFIG_SPACE_SIZE)
323 			bufsize = PCI_CONFIG_SPACE_SIZE - cap_data;
324 		*buflen = bufsize;
325 		while (bufsize--) {
326 			bcma_extpci_read_config(pc, dev, func, cap_data, buf,
327 						sizeof(u8));
328 			cap_data++;
329 			buf++;
330 		}
331 	}
332 
333 	return cap_ptr;
334 }
335 
336 /* If the root port is capable of returning Config Request
337  * Retry Status (CRS) Completion Status to software then
338  * enable the feature.
339  */
bcma_core_pci_enable_crs(struct bcma_drv_pci * pc)340 static void bcma_core_pci_enable_crs(struct bcma_drv_pci *pc)
341 {
342 	struct bcma_bus *bus = pc->core->bus;
343 	u8 cap_ptr, root_ctrl, root_cap, dev;
344 	u16 val16;
345 	int i;
346 
347 	cap_ptr = bcma_find_pci_capability(pc, 0, 0, PCI_CAP_ID_EXP, NULL,
348 					   NULL);
349 	root_cap = cap_ptr + PCI_EXP_RTCAP;
350 	bcma_extpci_read_config(pc, 0, 0, root_cap, &val16, sizeof(u16));
351 	if (val16 & BCMA_CORE_PCI_RC_CRS_VISIBILITY) {
352 		/* Enable CRS software visibility */
353 		root_ctrl = cap_ptr + PCI_EXP_RTCTL;
354 		val16 = PCI_EXP_RTCTL_CRSSVE;
355 		bcma_extpci_read_config(pc, 0, 0, root_ctrl, &val16,
356 					sizeof(u16));
357 
358 		/* Initiate a configuration request to read the vendor id
359 		 * field of the device function's config space header after
360 		 * 100 ms wait time from the end of Reset. If the device is
361 		 * not done with its internal initialization, it must at
362 		 * least return a completion TLP, with a completion status
363 		 * of "Configuration Request Retry Status (CRS)". The root
364 		 * complex must complete the request to the host by returning
365 		 * a read-data value of 0001h for the Vendor ID field and
366 		 * all 1s for any additional bytes included in the request.
367 		 * Poll using the config reads for max wait time of 1 sec or
368 		 * until we receive the successful completion status. Repeat
369 		 * the procedure for all the devices.
370 		 */
371 		for (dev = 1; dev < BCMA_PCI_SLOT_MAX; dev++) {
372 			for (i = 0; i < 100000; i++) {
373 				bcma_extpci_read_config(pc, dev, 0,
374 							PCI_VENDOR_ID, &val16,
375 							sizeof(val16));
376 				if (val16 != 0x1)
377 					break;
378 				udelay(10);
379 			}
380 			if (val16 == 0x1)
381 				bcma_err(bus, "PCI: Broken device in slot %d\n",
382 					 dev);
383 		}
384 	}
385 }
386 
bcma_core_pci_hostmode_init(struct bcma_drv_pci * pc)387 void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
388 {
389 	struct bcma_bus *bus = pc->core->bus;
390 	struct bcma_drv_pci_host *pc_host;
391 	u32 tmp;
392 	u32 pci_membase_1G;
393 	unsigned long io_map_base;
394 
395 	bcma_info(bus, "PCIEcore in host mode found\n");
396 
397 	if (bus->sprom.boardflags_lo & BCMA_CORE_PCI_BFL_NOPCI) {
398 		bcma_info(bus, "This PCIE core is disabled and not working\n");
399 		return;
400 	}
401 
402 	pc_host = kzalloc(sizeof(*pc_host), GFP_KERNEL);
403 	if (!pc_host)  {
404 		bcma_err(bus, "can not allocate memory");
405 		return;
406 	}
407 
408 	spin_lock_init(&pc_host->cfgspace_lock);
409 
410 	pc->host_controller = pc_host;
411 	pc_host->pci_controller.io_resource = &pc_host->io_resource;
412 	pc_host->pci_controller.mem_resource = &pc_host->mem_resource;
413 	pc_host->pci_controller.pci_ops = &pc_host->pci_ops;
414 	pc_host->pdev = pc;
415 
416 	pci_membase_1G = BCMA_SOC_PCI_DMA;
417 	pc_host->host_cfg_addr = BCMA_SOC_PCI_CFG;
418 
419 	pc_host->pci_ops.read = bcma_core_pci_hostmode_read_config;
420 	pc_host->pci_ops.write = bcma_core_pci_hostmode_write_config;
421 
422 	pc_host->mem_resource.name = "BCMA PCIcore external memory";
423 	pc_host->mem_resource.start = BCMA_SOC_PCI_DMA;
424 	pc_host->mem_resource.end = BCMA_SOC_PCI_DMA + BCMA_SOC_PCI_DMA_SZ - 1;
425 	pc_host->mem_resource.flags = IORESOURCE_MEM | IORESOURCE_PCI_FIXED;
426 
427 	pc_host->io_resource.name = "BCMA PCIcore external I/O";
428 	pc_host->io_resource.start = 0x100;
429 	pc_host->io_resource.end = 0x7FF;
430 	pc_host->io_resource.flags = IORESOURCE_IO | IORESOURCE_PCI_FIXED;
431 
432 	/* Reset RC */
433 	usleep_range(3000, 5000);
434 	pcicore_write32(pc, BCMA_CORE_PCI_CTL, BCMA_CORE_PCI_CTL_RST_OE);
435 	msleep(50);
436 	pcicore_write32(pc, BCMA_CORE_PCI_CTL, BCMA_CORE_PCI_CTL_RST |
437 			BCMA_CORE_PCI_CTL_RST_OE);
438 
439 	/* 64 MB I/O access window. On 4716, use
440 	 * sbtopcie0 to access the device registers. We
441 	 * can't use address match 2 (1 GB window) region
442 	 * as mips can't generate 64-bit address on the
443 	 * backplane.
444 	 */
445 	if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4716 ||
446 	    bus->chipinfo.id == BCMA_CHIP_ID_BCM4748) {
447 		pc_host->mem_resource.start = BCMA_SOC_PCI_MEM;
448 		pc_host->mem_resource.end = BCMA_SOC_PCI_MEM +
449 					    BCMA_SOC_PCI_MEM_SZ - 1;
450 		pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
451 				BCMA_CORE_PCI_SBTOPCI_MEM | BCMA_SOC_PCI_MEM);
452 	} else if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) {
453 		tmp = BCMA_CORE_PCI_SBTOPCI_MEM;
454 		tmp |= BCMA_CORE_PCI_SBTOPCI_PREF;
455 		tmp |= BCMA_CORE_PCI_SBTOPCI_BURST;
456 		if (pc->core->core_unit == 0) {
457 			pc_host->mem_resource.start = BCMA_SOC_PCI_MEM;
458 			pc_host->mem_resource.end = BCMA_SOC_PCI_MEM +
459 						    BCMA_SOC_PCI_MEM_SZ - 1;
460 			pc_host->io_resource.start = 0x100;
461 			pc_host->io_resource.end = 0x47F;
462 			pci_membase_1G = BCMA_SOC_PCIE_DMA_H32;
463 			pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
464 					tmp | BCMA_SOC_PCI_MEM);
465 		} else if (pc->core->core_unit == 1) {
466 			pc_host->mem_resource.start = BCMA_SOC_PCI1_MEM;
467 			pc_host->mem_resource.end = BCMA_SOC_PCI1_MEM +
468 						    BCMA_SOC_PCI_MEM_SZ - 1;
469 			pc_host->io_resource.start = 0x480;
470 			pc_host->io_resource.end = 0x7FF;
471 			pci_membase_1G = BCMA_SOC_PCIE1_DMA_H32;
472 			pc_host->host_cfg_addr = BCMA_SOC_PCI1_CFG;
473 			pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
474 					tmp | BCMA_SOC_PCI1_MEM);
475 		}
476 	} else
477 		pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
478 				BCMA_CORE_PCI_SBTOPCI_IO);
479 
480 	/* 64 MB configuration access window */
481 	pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI1, BCMA_CORE_PCI_SBTOPCI_CFG0);
482 
483 	/* 1 GB memory access window */
484 	pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI2,
485 			BCMA_CORE_PCI_SBTOPCI_MEM | pci_membase_1G);
486 
487 
488 	/* As per PCI Express Base Spec 1.1 we need to wait for
489 	 * at least 100 ms from the end of a reset (cold/warm/hot)
490 	 * before issuing configuration requests to PCI Express
491 	 * devices.
492 	 */
493 	msleep(100);
494 
495 	bcma_core_pci_enable_crs(pc);
496 
497 	if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706 ||
498 	    bus->chipinfo.id == BCMA_CHIP_ID_BCM4716) {
499 		u16 val16;
500 		bcma_extpci_read_config(pc, 0, 0, BCMA_CORE_PCI_CFG_DEVCTRL,
501 					&val16, sizeof(val16));
502 		val16 |= (2 << 5);	/* Max payload size of 512 */
503 		val16 |= (2 << 12);	/* MRRS 512 */
504 		bcma_extpci_write_config(pc, 0, 0, BCMA_CORE_PCI_CFG_DEVCTRL,
505 					 &val16, sizeof(val16));
506 	}
507 
508 	/* Enable PCI bridge BAR0 memory & master access */
509 	tmp = PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
510 	bcma_extpci_write_config(pc, 0, 0, PCI_COMMAND, &tmp, sizeof(tmp));
511 
512 	/* Enable PCI interrupts */
513 	pcicore_write32(pc, BCMA_CORE_PCI_IMASK, BCMA_CORE_PCI_IMASK_INTA);
514 
515 	/* Ok, ready to run, register it to the system.
516 	 * The following needs change, if we want to port hostmode
517 	 * to non-MIPS platform. */
518 	io_map_base = (unsigned long)ioremap(pc_host->mem_resource.start,
519 						     resource_size(&pc_host->mem_resource));
520 	pc_host->pci_controller.io_map_base = io_map_base;
521 	set_io_port_base(pc_host->pci_controller.io_map_base);
522 	/* Give some time to the PCI controller to configure itself with the new
523 	 * values. Not waiting at this point causes crashes of the machine. */
524 	usleep_range(10000, 15000);
525 	register_pci_controller(&pc_host->pci_controller);
526 	return;
527 }
528 
529 /* Early PCI fixup for a device on the PCI-core bridge. */
bcma_core_pci_fixup_pcibridge(struct pci_dev * dev)530 static void bcma_core_pci_fixup_pcibridge(struct pci_dev *dev)
531 {
532 	if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) {
533 		/* This is not a device on the PCI-core bridge. */
534 		return;
535 	}
536 	if (PCI_SLOT(dev->devfn) != 0)
537 		return;
538 
539 	pr_info("PCI: Fixing up bridge %s\n", pci_name(dev));
540 
541 	/* Enable PCI bridge bus mastering and memory space */
542 	pci_set_master(dev);
543 	if (pcibios_enable_device(dev, ~0) < 0) {
544 		pr_err("PCI: BCMA bridge enable failed\n");
545 		return;
546 	}
547 
548 	/* Enable PCI bridge BAR1 prefetch and burst */
549 	pci_write_config_dword(dev, BCMA_PCI_BAR1_CONTROL, 3);
550 }
551 DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, bcma_core_pci_fixup_pcibridge);
552 
553 /* Early PCI fixup for all PCI-cores to set the correct memory address. */
bcma_core_pci_fixup_addresses(struct pci_dev * dev)554 static void bcma_core_pci_fixup_addresses(struct pci_dev *dev)
555 {
556 	struct resource *res;
557 	int pos, err;
558 
559 	if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) {
560 		/* This is not a device on the PCI-core bridge. */
561 		return;
562 	}
563 	if (PCI_SLOT(dev->devfn) == 0)
564 		return;
565 
566 	pr_info("PCI: Fixing up addresses %s\n", pci_name(dev));
567 
568 	for (pos = 0; pos < 6; pos++) {
569 		res = &dev->resource[pos];
570 		if (res->flags & (IORESOURCE_IO | IORESOURCE_MEM)) {
571 			err = pci_assign_resource(dev, pos);
572 			if (err)
573 				pr_err("PCI: Problem fixing up the addresses on %s\n",
574 				       pci_name(dev));
575 		}
576 	}
577 }
578 DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, bcma_core_pci_fixup_addresses);
579 
580 /* This function is called when doing a pci_enable_device().
581  * We must first check if the device is a device on the PCI-core bridge. */
bcma_core_pci_plat_dev_init(struct pci_dev * dev)582 int bcma_core_pci_plat_dev_init(struct pci_dev *dev)
583 {
584 	struct bcma_drv_pci_host *pc_host;
585 	int readrq;
586 
587 	if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) {
588 		/* This is not a device on the PCI-core bridge. */
589 		return -ENODEV;
590 	}
591 	pc_host = container_of(dev->bus->ops, struct bcma_drv_pci_host,
592 			       pci_ops);
593 
594 	pr_info("PCI: Fixing up device %s\n", pci_name(dev));
595 
596 	/* Fix up interrupt lines */
597 	dev->irq = bcma_core_irq(pc_host->pdev->core, 0);
598 	pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
599 
600 	readrq = pcie_get_readrq(dev);
601 	if (readrq > 128) {
602 		pr_info("change PCIe max read request size from %i to 128\n", readrq);
603 		pcie_set_readrq(dev, 128);
604 	}
605 	return 0;
606 }
607 EXPORT_SYMBOL(bcma_core_pci_plat_dev_init);
608 
609 /* PCI device IRQ mapping. */
bcma_core_pci_pcibios_map_irq(const struct pci_dev * dev)610 int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev)
611 {
612 	struct bcma_drv_pci_host *pc_host;
613 
614 	if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) {
615 		/* This is not a device on the PCI-core bridge. */
616 		return -ENODEV;
617 	}
618 
619 	pc_host = container_of(dev->bus->ops, struct bcma_drv_pci_host,
620 			       pci_ops);
621 	return bcma_core_irq(pc_host->pdev->core, 0);
622 }
623 EXPORT_SYMBOL(bcma_core_pci_pcibios_map_irq);
624