• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *	linux/arch/alpha/kernel/pci.c
3  *
4  * Extruded from code written by
5  *	Dave Rusling (david.rusling@reo.mts.dec.com)
6  *	David Mosberger (davidm@cs.arizona.edu)
7  */
8 
9 /* 2.3.x PCI/resources, 1999 Andrea Arcangeli <andrea@suse.de> */
10 
11 /*
12  * Nov 2000, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
13  *	     PCI-PCI bridges cleanup
14  */
15 #include <linux/string.h>
16 #include <linux/pci.h>
17 #include <linux/init.h>
18 #include <linux/ioport.h>
19 #include <linux/kernel.h>
20 #include <linux/bootmem.h>
21 #include <linux/module.h>
22 #include <linux/cache.h>
23 #include <linux/slab.h>
24 #include <asm/machvec.h>
25 
26 #include "proto.h"
27 #include "pci_impl.h"
28 
29 
30 /*
31  * Some string constants used by the various core logics.
32  */
33 
34 const char *const pci_io_names[] = {
35   "PCI IO bus 0", "PCI IO bus 1", "PCI IO bus 2", "PCI IO bus 3",
36   "PCI IO bus 4", "PCI IO bus 5", "PCI IO bus 6", "PCI IO bus 7"
37 };
38 
39 const char *const pci_mem_names[] = {
40   "PCI mem bus 0", "PCI mem bus 1", "PCI mem bus 2", "PCI mem bus 3",
41   "PCI mem bus 4", "PCI mem bus 5", "PCI mem bus 6", "PCI mem bus 7"
42 };
43 
44 const char pci_hae0_name[] = "HAE0";
45 
46 /* Indicate whether we respect the PCI setup left by console. */
47 /*
48  * Make this long-lived  so that we know when shutting down
49  * whether we probed only or not.
50  */
51 int pci_probe_only;
52 
53 /*
54  * The PCI controller list.
55  */
56 
57 struct pci_controller *hose_head, **hose_tail = &hose_head;
58 struct pci_controller *pci_isa_hose;
59 
60 /*
61  * Quirks.
62  */
63 
64 static void __init
quirk_isa_bridge(struct pci_dev * dev)65 quirk_isa_bridge(struct pci_dev *dev)
66 {
67 	dev->class = PCI_CLASS_BRIDGE_ISA << 8;
68 }
69 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82378, quirk_isa_bridge);
70 
71 static void __init
quirk_cypress(struct pci_dev * dev)72 quirk_cypress(struct pci_dev *dev)
73 {
74 	/* The Notorious Cy82C693 chip.  */
75 
76 	/* The generic legacy mode IDE fixup in drivers/pci/probe.c
77 	   doesn't work correctly with the Cypress IDE controller as
78 	   it has non-standard register layout.  Fix that.  */
79 	if (dev->class >> 8 == PCI_CLASS_STORAGE_IDE) {
80 		dev->resource[2].start = dev->resource[3].start = 0;
81 		dev->resource[2].end = dev->resource[3].end = 0;
82 		dev->resource[2].flags = dev->resource[3].flags = 0;
83 		if (PCI_FUNC(dev->devfn) == 2) {
84 			dev->resource[0].start = 0x170;
85 			dev->resource[0].end = 0x177;
86 			dev->resource[1].start = 0x376;
87 			dev->resource[1].end = 0x376;
88 		}
89 	}
90 
91 	/* The Cypress bridge responds on the PCI bus in the address range
92 	   0xffff0000-0xffffffff (conventional x86 BIOS ROM).  There is no
93 	   way to turn this off.  The bridge also supports several extended
94 	   BIOS ranges (disabled after power-up), and some consoles do turn
95 	   them on.  So if we use a large direct-map window, or a large SG
96 	   window, we must avoid the entire 0xfff00000-0xffffffff region.  */
97 	if (dev->class >> 8 == PCI_CLASS_BRIDGE_ISA) {
98 		if (__direct_map_base + __direct_map_size >= 0xfff00000UL)
99 			__direct_map_size = 0xfff00000UL - __direct_map_base;
100 		else {
101 			struct pci_controller *hose = dev->sysdata;
102 			struct pci_iommu_arena *pci = hose->sg_pci;
103 			if (pci && pci->dma_base + pci->size >= 0xfff00000UL)
104 				pci->size = 0xfff00000UL - pci->dma_base;
105 		}
106 	}
107 }
108 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693, quirk_cypress);
109 
110 /* Called for each device after PCI setup is done. */
111 static void __init
pcibios_fixup_final(struct pci_dev * dev)112 pcibios_fixup_final(struct pci_dev *dev)
113 {
114 	unsigned int class = dev->class >> 8;
115 
116 	if (class == PCI_CLASS_BRIDGE_ISA || class == PCI_CLASS_BRIDGE_EISA) {
117 		dev->dma_mask = MAX_ISA_DMA_ADDRESS - 1;
118 		isa_bridge = dev;
119 	}
120 }
121 DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_final);
122 
123 /* Just declaring that the power-of-ten prefixes are actually the
124    power-of-two ones doesn't make it true :) */
125 #define KB			1024
126 #define MB			(1024*KB)
127 #define GB			(1024*MB)
128 
129 void
pcibios_align_resource(void * data,struct resource * res,resource_size_t size,resource_size_t align)130 pcibios_align_resource(void *data, struct resource *res,
131 		       resource_size_t size, resource_size_t align)
132 {
133 	struct pci_dev *dev = data;
134 	struct pci_controller *hose = dev->sysdata;
135 	unsigned long alignto;
136 	resource_size_t start = res->start;
137 
138 	if (res->flags & IORESOURCE_IO) {
139 		/* Make sure we start at our min on all hoses */
140 		if (start - hose->io_space->start < PCIBIOS_MIN_IO)
141 			start = PCIBIOS_MIN_IO + hose->io_space->start;
142 
143 		/*
144 		 * Put everything into 0x00-0xff region modulo 0x400
145 		 */
146 		if (start & 0x300)
147 			start = (start + 0x3ff) & ~0x3ff;
148 	}
149 	else if	(res->flags & IORESOURCE_MEM) {
150 		/* Make sure we start at our min on all hoses */
151 		if (start - hose->mem_space->start < PCIBIOS_MIN_MEM)
152 			start = PCIBIOS_MIN_MEM + hose->mem_space->start;
153 
154 		/*
155 		 * The following holds at least for the Low Cost
156 		 * Alpha implementation of the PCI interface:
157 		 *
158 		 * In sparse memory address space, the first
159 		 * octant (16MB) of every 128MB segment is
160 		 * aliased to the very first 16 MB of the
161 		 * address space (i.e., it aliases the ISA
162 		 * memory address space).  Thus, we try to
163 		 * avoid allocating PCI devices in that range.
164 		 * Can be allocated in 2nd-7th octant only.
165 		 * Devices that need more than 112MB of
166 		 * address space must be accessed through
167 		 * dense memory space only!
168 		 */
169 
170 		/* Align to multiple of size of minimum base.  */
171 		alignto = max(0x1000UL, align);
172 		start = ALIGN(start, alignto);
173 		if (hose->sparse_mem_base && size <= 7 * 16*MB) {
174 			if (((start / (16*MB)) & 0x7) == 0) {
175 				start &= ~(128*MB - 1);
176 				start += 16*MB;
177 				start  = ALIGN(start, alignto);
178 			}
179 			if (start/(128*MB) != (start + size - 1)/(128*MB)) {
180 				start &= ~(128*MB - 1);
181 				start += (128 + 16)*MB;
182 				start  = ALIGN(start, alignto);
183 			}
184 		}
185 	}
186 
187 	res->start = start;
188 }
189 #undef KB
190 #undef MB
191 #undef GB
192 
193 static int __init
pcibios_init(void)194 pcibios_init(void)
195 {
196 	if (alpha_mv.init_pci)
197 		alpha_mv.init_pci();
198 	return 0;
199 }
200 
201 subsys_initcall(pcibios_init);
202 
203 char * __devinit
pcibios_setup(char * str)204 pcibios_setup(char *str)
205 {
206 	return str;
207 }
208 
209 #ifdef ALPHA_RESTORE_SRM_SETUP
210 static struct pdev_srm_saved_conf *srm_saved_configs;
211 
212 void __devinit
pdev_save_srm_config(struct pci_dev * dev)213 pdev_save_srm_config(struct pci_dev *dev)
214 {
215 	struct pdev_srm_saved_conf *tmp;
216 	static int printed = 0;
217 
218 	if (!alpha_using_srm || pci_probe_only)
219 		return;
220 
221 	if (!printed) {
222 		printk(KERN_INFO "pci: enabling save/restore of SRM state\n");
223 		printed = 1;
224 	}
225 
226 	tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
227 	if (!tmp) {
228 		printk(KERN_ERR "%s: kmalloc() failed!\n", __func__);
229 		return;
230 	}
231 	tmp->next = srm_saved_configs;
232 	tmp->dev = dev;
233 
234 	pci_save_state(dev);
235 
236 	srm_saved_configs = tmp;
237 }
238 
239 void
pci_restore_srm_config(void)240 pci_restore_srm_config(void)
241 {
242 	struct pdev_srm_saved_conf *tmp;
243 
244 	/* No need to restore if probed only. */
245 	if (pci_probe_only)
246 		return;
247 
248 	/* Restore SRM config. */
249 	for (tmp = srm_saved_configs; tmp; tmp = tmp->next) {
250 		pci_restore_state(tmp->dev);
251 	}
252 }
253 #endif
254 
255 void __devinit
pcibios_fixup_resource(struct resource * res,struct resource * root)256 pcibios_fixup_resource(struct resource *res, struct resource *root)
257 {
258 	res->start += root->start;
259 	res->end += root->start;
260 }
261 
262 void __devinit
pcibios_fixup_device_resources(struct pci_dev * dev,struct pci_bus * bus)263 pcibios_fixup_device_resources(struct pci_dev *dev, struct pci_bus *bus)
264 {
265 	/* Update device resources.  */
266 	struct pci_controller *hose = (struct pci_controller *)bus->sysdata;
267 	int i;
268 
269 	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
270 		if (!dev->resource[i].start)
271 			continue;
272 		if (dev->resource[i].flags & IORESOURCE_IO)
273 			pcibios_fixup_resource(&dev->resource[i],
274 					       hose->io_space);
275 		else if (dev->resource[i].flags & IORESOURCE_MEM)
276 			pcibios_fixup_resource(&dev->resource[i],
277 					       hose->mem_space);
278 	}
279 }
280 
281 void __devinit
pcibios_fixup_bus(struct pci_bus * bus)282 pcibios_fixup_bus(struct pci_bus *bus)
283 {
284 	/* Propagate hose info into the subordinate devices.  */
285 
286 	struct pci_controller *hose = bus->sysdata;
287 	struct pci_dev *dev = bus->self;
288 
289 	if (!dev) {
290 		/* Root bus. */
291 		u32 pci_mem_end;
292 		u32 sg_base = hose->sg_pci ? hose->sg_pci->dma_base : ~0;
293 		unsigned long end;
294 
295 		bus->resource[0] = hose->io_space;
296 		bus->resource[1] = hose->mem_space;
297 
298 		/* Adjust hose mem_space limit to prevent PCI allocations
299 		   in the iommu windows. */
300 		pci_mem_end = min((u32)__direct_map_base, sg_base) - 1;
301 		end = hose->mem_space->start + pci_mem_end;
302 		if (hose->mem_space->end > end)
303 			hose->mem_space->end = end;
304  	} else if (pci_probe_only &&
305  		   (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
306  		pci_read_bridge_bases(bus);
307  		pcibios_fixup_device_resources(dev, bus);
308 	}
309 
310 	list_for_each_entry(dev, &bus->devices, bus_list) {
311 		pdev_save_srm_config(dev);
312 		if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
313 			pcibios_fixup_device_resources(dev, bus);
314 	}
315 }
316 
317 void __init
pcibios_update_irq(struct pci_dev * dev,int irq)318 pcibios_update_irq(struct pci_dev *dev, int irq)
319 {
320 	pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
321 }
322 
323 void
pcibios_resource_to_bus(struct pci_dev * dev,struct pci_bus_region * region,struct resource * res)324 pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
325 			 struct resource *res)
326 {
327 	struct pci_controller *hose = (struct pci_controller *)dev->sysdata;
328 	unsigned long offset = 0;
329 
330 	if (res->flags & IORESOURCE_IO)
331 		offset = hose->io_space->start;
332 	else if (res->flags & IORESOURCE_MEM)
333 		offset = hose->mem_space->start;
334 
335 	region->start = res->start - offset;
336 	region->end = res->end - offset;
337 }
338 
pcibios_bus_to_resource(struct pci_dev * dev,struct resource * res,struct pci_bus_region * region)339 void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
340 			     struct pci_bus_region *region)
341 {
342 	struct pci_controller *hose = (struct pci_controller *)dev->sysdata;
343 	unsigned long offset = 0;
344 
345 	if (res->flags & IORESOURCE_IO)
346 		offset = hose->io_space->start;
347 	else if (res->flags & IORESOURCE_MEM)
348 		offset = hose->mem_space->start;
349 
350 	res->start = region->start + offset;
351 	res->end = region->end + offset;
352 }
353 
354 #ifdef CONFIG_HOTPLUG
355 EXPORT_SYMBOL(pcibios_resource_to_bus);
356 EXPORT_SYMBOL(pcibios_bus_to_resource);
357 #endif
358 
359 int
pcibios_enable_device(struct pci_dev * dev,int mask)360 pcibios_enable_device(struct pci_dev *dev, int mask)
361 {
362 	return pci_enable_resources(dev, mask);
363 }
364 
365 /*
366  *  If we set up a device for bus mastering, we need to check the latency
367  *  timer as certain firmware forgets to set it properly, as seen
368  *  on SX164 and LX164 with SRM.
369  */
370 void
pcibios_set_master(struct pci_dev * dev)371 pcibios_set_master(struct pci_dev *dev)
372 {
373 	u8 lat;
374 	pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
375 	if (lat >= 16) return;
376 	printk("PCI: Setting latency timer of device %s to 64\n",
377 							pci_name(dev));
378 	pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64);
379 }
380 
381 void __init
pcibios_claim_one_bus(struct pci_bus * b)382 pcibios_claim_one_bus(struct pci_bus *b)
383 {
384 	struct pci_dev *dev;
385 	struct pci_bus *child_bus;
386 
387 	list_for_each_entry(dev, &b->devices, bus_list) {
388 		int i;
389 
390 		for (i = 0; i < PCI_NUM_RESOURCES; i++) {
391 			struct resource *r = &dev->resource[i];
392 
393 			if (r->parent || !r->start || !r->flags)
394 				continue;
395 			if (pci_probe_only || (r->flags & IORESOURCE_PCI_FIXED))
396 				pci_claim_resource(dev, i);
397 		}
398 	}
399 
400 	list_for_each_entry(child_bus, &b->children, node)
401 		pcibios_claim_one_bus(child_bus);
402 }
403 
404 static void __init
pcibios_claim_console_setup(void)405 pcibios_claim_console_setup(void)
406 {
407 	struct pci_bus *b;
408 
409 	list_for_each_entry(b, &pci_root_buses, node)
410 		pcibios_claim_one_bus(b);
411 }
412 
413 void __init
common_init_pci(void)414 common_init_pci(void)
415 {
416 	struct pci_controller *hose;
417 	struct pci_bus *bus;
418 	int next_busno;
419 	int need_domain_info = 0;
420 
421 	/* Scan all of the recorded PCI controllers.  */
422 	for (next_busno = 0, hose = hose_head; hose; hose = hose->next) {
423 		bus = pci_scan_bus(next_busno, alpha_mv.pci_ops, hose);
424 		hose->bus = bus;
425 		hose->need_domain_info = need_domain_info;
426 		next_busno = bus->subordinate + 1;
427 		/* Don't allow 8-bit bus number overflow inside the hose -
428 		   reserve some space for bridges. */
429 		if (next_busno > 224) {
430 			next_busno = 0;
431 			need_domain_info = 1;
432 		}
433 	}
434 
435 	pcibios_claim_console_setup();
436 
437 	pci_assign_unassigned_resources();
438 	pci_fixup_irqs(alpha_mv.pci_swizzle, alpha_mv.pci_map_irq);
439 }
440 
441 
442 struct pci_controller * __init
alloc_pci_controller(void)443 alloc_pci_controller(void)
444 {
445 	struct pci_controller *hose;
446 
447 	hose = alloc_bootmem(sizeof(*hose));
448 
449 	*hose_tail = hose;
450 	hose_tail = &hose->next;
451 
452 	return hose;
453 }
454 
455 struct resource * __init
alloc_resource(void)456 alloc_resource(void)
457 {
458 	struct resource *res;
459 
460 	res = alloc_bootmem(sizeof(*res));
461 
462 	return res;
463 }
464 
465 
466 /* Provide information on locations of various I/O regions in physical
467    memory.  Do this on a per-card basis so that we choose the right hose.  */
468 
469 asmlinkage long
sys_pciconfig_iobase(long which,unsigned long bus,unsigned long dfn)470 sys_pciconfig_iobase(long which, unsigned long bus, unsigned long dfn)
471 {
472 	struct pci_controller *hose;
473 	struct pci_dev *dev;
474 
475 	/* from hose or from bus.devfn */
476 	if (which & IOBASE_FROM_HOSE) {
477 		for(hose = hose_head; hose; hose = hose->next)
478 			if (hose->index == bus) break;
479 		if (!hose) return -ENODEV;
480 	} else {
481 		/* Special hook for ISA access.  */
482 		if (bus == 0 && dfn == 0) {
483 			hose = pci_isa_hose;
484 		} else {
485 			dev = pci_get_bus_and_slot(bus, dfn);
486 			if (!dev)
487 				return -ENODEV;
488 			hose = dev->sysdata;
489 			pci_dev_put(dev);
490 		}
491 	}
492 
493 	switch (which & ~IOBASE_FROM_HOSE) {
494 	case IOBASE_HOSE:
495 		return hose->index;
496 	case IOBASE_SPARSE_MEM:
497 		return hose->sparse_mem_base;
498 	case IOBASE_DENSE_MEM:
499 		return hose->dense_mem_base;
500 	case IOBASE_SPARSE_IO:
501 		return hose->sparse_io_base;
502 	case IOBASE_DENSE_IO:
503 		return hose->dense_io_base;
504 	case IOBASE_ROOT_BUS:
505 		return hose->bus->number;
506 	}
507 
508 	return -EOPNOTSUPP;
509 }
510 
511 /* Create an __iomem token from a PCI BAR.  Copied from lib/iomap.c with
512    no changes, since we don't want the other things in that object file.  */
513 
pci_iomap(struct pci_dev * dev,int bar,unsigned long maxlen)514 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
515 {
516 	resource_size_t start = pci_resource_start(dev, bar);
517 	resource_size_t len = pci_resource_len(dev, bar);
518 	unsigned long flags = pci_resource_flags(dev, bar);
519 
520 	if (!len || !start)
521 		return NULL;
522 	if (maxlen && len > maxlen)
523 		len = maxlen;
524 	if (flags & IORESOURCE_IO)
525 		return ioport_map(start, len);
526 	if (flags & IORESOURCE_MEM) {
527 		/* Not checking IORESOURCE_CACHEABLE because alpha does
528 		   not distinguish between ioremap and ioremap_nocache.  */
529 		return ioremap(start, len);
530 	}
531 	return NULL;
532 }
533 
534 /* Destroy that token.  Not copied from lib/iomap.c.  */
535 
pci_iounmap(struct pci_dev * dev,void __iomem * addr)536 void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
537 {
538 	if (__is_mmio(addr))
539 		iounmap(addr);
540 }
541 
542 EXPORT_SYMBOL(pci_iomap);
543 EXPORT_SYMBOL(pci_iounmap);
544 
545 /* FIXME: Some boxes have multiple ISA bridges! */
546 struct pci_dev *isa_bridge;
547 EXPORT_SYMBOL(isa_bridge);
548